problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_20489 | rasdani/github-patches | git_diff | geopandas__geopandas-1220 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: Carry original schema and use it during to_file
Following the discussion in #1185, I have drafted a PR which saves schema during `read_file` to `gdf.schema` and then during `to_file` checks if the original schema for each column is still applicable. If so, it uses it, if not it infers new one based on the dtype as we do it now.
I am pretty sure that there will be some corner cases which are not covered here, but I wanted to have this PR opened so we can discuss the implementation. But for the cases described in #1185 and earlier in #177, this should work.
```
path = gpd.datasets.get_path('nybb')
gdf = gpd.read_file(path)
gdf.schema # original schema from fiona
{'properties': OrderedDict([('BoroCode', 'int:4'),
('BoroName', 'str:32'),
('Shape_Leng', 'float:19.11'),
('Shape_Area', 'float:19.11')]),
'geometry': 'Polygon'}
```
```
gpd.io.file.infer_schema(gdf)
{'geometry': 'MultiPolygon',
'properties': OrderedDict([('BoroCode', 'int:4'),
('BoroName', 'str:32'),
('Shape_Leng', 'float:19.11'),
('Shape_Area', 'float:19.11')])}
```
On master:
```
gpd.io.file.infer_schema(gdf)
{'geometry': 'MultiPolygon',
'properties': OrderedDict([('BoroCode', 'int'),
('BoroName', 'str'),
('Shape_Leng', 'float'),
('Shape_Area', 'float')])}
```
Closes #1185
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/file.py`
Content:
```
1 from distutils.version import LooseVersion
2
3 import numpy as np
4
5 import fiona
6
7 from geopandas import GeoDataFrame, GeoSeries
8
9 try:
10 from fiona import Env as fiona_env
11 except ImportError:
12 from fiona import drivers as fiona_env
13 # Adapted from pandas.io.common
14 from urllib.request import urlopen as _urlopen
15 from urllib.parse import urlparse as parse_url
16 from urllib.parse import uses_relative, uses_netloc, uses_params
17
18 _FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion("1.8")
19
20
21 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
22 _VALID_URLS.discard("")
23
24
25 def _is_url(url):
26 """Check to see if *url* has a valid protocol."""
27 try:
28 return parse_url(url).scheme in _VALID_URLS
29 except Exception:
30 return False
31
32
33 def read_file(filename, bbox=None, **kwargs):
34 """
35 Returns a GeoDataFrame from a file or URL.
36
37 Parameters
38 ----------
39 filename: str
40 Either the absolute or relative path to the file or URL to
41 be opened.
42 bbox : tuple | GeoDataFrame or GeoSeries, default None
43 Filter features by given bounding box, GeoSeries, or GeoDataFrame.
44 CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
45 **kwargs:
46 Keyword args to be passed to the `open` or `BytesCollection` method
47 in the fiona library when opening the file. For more information on
48 possible keywords, type:
49 ``import fiona; help(fiona.open)``
50
51 Examples
52 --------
53 >>> df = geopandas.read_file("nybb.shp")
54
55 Returns
56 -------
57 geodataframe : GeoDataFrame
58 """
59 if _is_url(filename):
60 req = _urlopen(filename)
61 path_or_bytes = req.read()
62 reader = fiona.BytesCollection
63 else:
64 path_or_bytes = filename
65 reader = fiona.open
66
67 with fiona_env():
68 with reader(path_or_bytes, **kwargs) as features:
69
70 # In a future Fiona release the crs attribute of features will
71 # no longer be a dict. The following code will be both forward
72 # and backward compatible.
73 if hasattr(features.crs, "to_dict"):
74 crs = features.crs.to_dict()
75 else:
76 crs = features.crs
77
78 if bbox is not None:
79 if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
80 bbox = tuple(bbox.to_crs(crs).total_bounds)
81 assert len(bbox) == 4
82 f_filt = features.filter(bbox=bbox)
83 else:
84 f_filt = features
85
86 columns = list(features.meta["schema"]["properties"]) + ["geometry"]
87 gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
88
89 return gdf
90
91
92 def to_file(df, filename, driver="ESRI Shapefile", schema=None, **kwargs):
93 """
94 Write this GeoDataFrame to an OGR data source
95
96 A dictionary of supported OGR providers is available via:
97 >>> import fiona
98 >>> fiona.supported_drivers
99
100 Parameters
101 ----------
102 df : GeoDataFrame to be written
103 filename : string
104 File path or file handle to write to.
105 driver : string, default 'ESRI Shapefile'
106 The OGR format driver used to write the vector file.
107 schema : dict, default None
108 If specified, the schema dictionary is passed to Fiona to
109 better control how the file is written. If None, GeoPandas
110 will determine the schema based on each column's dtype
111
112 The *kwargs* are passed to fiona.open and can be used to write
113 to multi-layer data, store data within archives (zip files), etc.
114 The path may specify a fiona VSI scheme.
115 """
116 if schema is None:
117 schema = infer_schema(df)
118 with fiona_env():
119 with fiona.open(
120 filename, "w", driver=driver, crs=df.crs, schema=schema, **kwargs
121 ) as colxn:
122 colxn.writerecords(df.iterfeatures())
123
124
125 def infer_schema(df):
126 try:
127 from collections import OrderedDict
128 except ImportError:
129 from ordereddict import OrderedDict
130
131 def convert_type(column, in_type):
132 if in_type == object:
133 return "str"
134 if in_type.name.startswith("datetime64"):
135 # numpy datetime type regardless of frequency
136 return "datetime"
137 out_type = type(np.zeros(1, in_type).item()).__name__
138 if out_type == "long":
139 out_type = "int"
140 if not _FIONA18 and out_type == "bool":
141 raise ValueError(
142 'column "{}" is boolean type, '.format(column)
143 + "which is unsupported in file writing with fiona "
144 "< 1.8. Consider casting the column to int type."
145 )
146 return out_type
147
148 properties = OrderedDict(
149 [
150 (col, convert_type(col, _type))
151 for col, _type in zip(df.columns, df.dtypes)
152 if col != df._geometry_column_name
153 ]
154 )
155
156 if df.empty:
157 raise ValueError("Cannot write empty DataFrame to file.")
158
159 # Since https://github.com/Toblerity/Fiona/issues/446 resolution,
160 # Fiona allows a list of geometry types
161 geom_types = _geometry_types(df)
162
163 schema = {"geometry": geom_types, "properties": properties}
164
165 return schema
166
167
168 def _geometry_types(df):
169 """
170 Determine the geometry types in the GeoDataFrame for the schema.
171 """
172 if _FIONA18:
173 # Starting from Fiona 1.8, schema submitted to fiona to write a gdf
174 # can have mixed geometries:
175 # - 3D and 2D shapes can coexist in inferred schema
176 # - Shape and MultiShape types can (and must) coexist in inferred
177 # schema
178 geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()
179 geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]
180 geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()
181 geom_types_3D = ["3D " + gtype for gtype in geom_types_3D if gtype is not None]
182 geom_types = geom_types_3D + geom_types_2D
183
184 else:
185 # Before Fiona 1.8, schema submitted to write a gdf should have
186 # one single geometry type whenever possible:
187 # - 3D and 2D shapes cannot coexist in inferred schema
188 # - Shape and MultiShape can not coexist in inferred schema
189 geom_types = _geometry_types_back_compat(df)
190
191 if len(geom_types) == 0:
192 # Default geometry type supported by Fiona
193 # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)
194 return "Unknown"
195
196 if len(geom_types) == 1:
197 geom_types = geom_types[0]
198
199 return geom_types
200
201
202 def _geometry_types_back_compat(df):
203 """
204 for backward compatibility with Fiona<1.8 only
205 """
206 unique_geom_types = df.geometry.geom_type.unique()
207 unique_geom_types = [gtype for gtype in unique_geom_types if gtype is not None]
208
209 # merge single and Multi types (eg Polygon and MultiPolygon)
210 unique_geom_types = [
211 gtype
212 for gtype in unique_geom_types
213 if not gtype.startswith("Multi") or gtype[5:] not in unique_geom_types
214 ]
215
216 if df.geometry.has_z.any():
217 # declare all geometries as 3D geometries
218 unique_geom_types = ["3D " + type for type in unique_geom_types]
219 # by default, all geometries are 2D geometries
220
221 return unique_geom_types
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -123,10 +123,10 @@
def infer_schema(df):
- try:
- from collections import OrderedDict
- except ImportError:
- from ordereddict import OrderedDict
+ from collections import OrderedDict
+
+ # TODO: test pandas string type and boolean type once released
+ types = {"Int64": "int", "string": "str", "boolean": "bool"}
def convert_type(column, in_type):
if in_type == object:
@@ -134,7 +134,10 @@
if in_type.name.startswith("datetime64"):
# numpy datetime type regardless of frequency
return "datetime"
- out_type = type(np.zeros(1, in_type).item()).__name__
+ if str(in_type) in types:
+ out_type = types[str(in_type)]
+ else:
+ out_type = type(np.zeros(1, in_type).item()).__name__
if out_type == "long":
out_type = "int"
if not _FIONA18 and out_type == "bool":
| {"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -123,10 +123,10 @@\n \n \n def infer_schema(df):\n- try:\n- from collections import OrderedDict\n- except ImportError:\n- from ordereddict import OrderedDict\n+ from collections import OrderedDict\n+\n+ # TODO: test pandas string type and boolean type once released\n+ types = {\"Int64\": \"int\", \"string\": \"str\", \"boolean\": \"bool\"}\n \n def convert_type(column, in_type):\n if in_type == object:\n@@ -134,7 +134,10 @@\n if in_type.name.startswith(\"datetime64\"):\n # numpy datetime type regardless of frequency\n return \"datetime\"\n- out_type = type(np.zeros(1, in_type).item()).__name__\n+ if str(in_type) in types:\n+ out_type = types[str(in_type)]\n+ else:\n+ out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == \"long\":\n out_type = \"int\"\n if not _FIONA18 and out_type == \"bool\":\n", "issue": "ENH: Carry original schema and use it during to_file\nFollowing the discussion in #1185, I have drafted a PR which saves schema during `read_file` to `gdf.schema` and then during `to_file` checks if the original schema for each column is still applicable. If so, it uses it, if not it infers new one based on the dtype as we do it now.\r\n\r\nI am pretty sure that there will be some corner cases which are not covered here, but I wanted to have this PR opened so we can discuss the implementation. But for the cases described in #1185 and earlier in #177, this should work.\r\n\r\n```\r\npath = gpd.datasets.get_path('nybb')\r\ngdf = gpd.read_file(path)\r\ngdf.schema # original schema from fiona\r\n\r\n{'properties': OrderedDict([('BoroCode', 'int:4'),\r\n ('BoroName', 'str:32'),\r\n ('Shape_Leng', 'float:19.11'),\r\n ('Shape_Area', 'float:19.11')]),\r\n 'geometry': 'Polygon'}\r\n```\r\n\r\n```\r\ngpd.io.file.infer_schema(gdf)\r\n{'geometry': 'MultiPolygon',\r\n\r\n 'properties': OrderedDict([('BoroCode', 'int:4'),\r\n ('BoroName', 'str:32'),\r\n ('Shape_Leng', 'float:19.11'),\r\n ('Shape_Area', 'float:19.11')])}\r\n```\r\n\r\nOn master:\r\n```\r\ngpd.io.file.infer_schema(gdf)\r\n\r\n{'geometry': 'MultiPolygon',\r\n 'properties': OrderedDict([('BoroCode', 'int'),\r\n ('BoroName', 'str'),\r\n ('Shape_Leng', 'float'),\r\n ('Shape_Area', 'float')])}\r\n```\r\n\r\nCloses #1185 \n", "before_files": [{"content": "from distutils.version import LooseVersion\n\nimport numpy as np\n\nimport fiona\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n# Adapted from pandas.io.common\nfrom urllib.request import urlopen as _urlopen\nfrom urllib.parse import urlparse as parse_url\nfrom urllib.parse import uses_relative, uses_netloc, uses_params\n\n_FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion(\"1.8\")\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard(\"\")\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except Exception:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with fiona_env():\n with reader(path_or_bytes, **kwargs) as features:\n\n # In a future Fiona release the crs attribute of features will\n # no longer be a dict. The following code will be both forward\n # and backward compatible.\n if hasattr(features.crs, \"to_dict\"):\n crs = features.crs.to_dict()\n else:\n crs = features.crs\n\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None, **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n The path may specify a fiona VSI scheme.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n with fiona_env():\n with fiona.open(\n filename, \"w\", driver=driver, crs=df.crs, schema=schema, **kwargs\n ) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return \"str\"\n if in_type.name.startswith(\"datetime64\"):\n # numpy datetime type regardless of frequency\n return \"datetime\"\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == \"long\":\n out_type = \"int\"\n if not _FIONA18 and out_type == \"bool\":\n raise ValueError(\n 'column \"{}\" is boolean type, '.format(column)\n + \"which is unsupported in file writing with fiona \"\n \"< 1.8. Consider casting the column to int type.\"\n )\n return out_type\n\n properties = OrderedDict(\n [\n (col, convert_type(col, _type))\n for col, _type in zip(df.columns, df.dtypes)\n if col != df._geometry_column_name\n ]\n )\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n # Since https://github.com/Toblerity/Fiona/issues/446 resolution,\n # Fiona allows a list of geometry types\n geom_types = _geometry_types(df)\n\n schema = {\"geometry\": geom_types, \"properties\": properties}\n\n return schema\n\n\ndef _geometry_types(df):\n \"\"\"\n Determine the geometry types in the GeoDataFrame for the schema.\n \"\"\"\n if _FIONA18:\n # Starting from Fiona 1.8, schema submitted to fiona to write a gdf\n # can have mixed geometries:\n # - 3D and 2D shapes can coexist in inferred schema\n # - Shape and MultiShape types can (and must) coexist in inferred\n # schema\n geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()\n geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]\n geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()\n geom_types_3D = [\"3D \" + gtype for gtype in geom_types_3D if gtype is not None]\n geom_types = geom_types_3D + geom_types_2D\n\n else:\n # Before Fiona 1.8, schema submitted to write a gdf should have\n # one single geometry type whenever possible:\n # - 3D and 2D shapes cannot coexist in inferred schema\n # - Shape and MultiShape can not coexist in inferred schema\n geom_types = _geometry_types_back_compat(df)\n\n if len(geom_types) == 0:\n # Default geometry type supported by Fiona\n # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)\n return \"Unknown\"\n\n if len(geom_types) == 1:\n geom_types = geom_types[0]\n\n return geom_types\n\n\ndef _geometry_types_back_compat(df):\n \"\"\"\n for backward compatibility with Fiona<1.8 only\n \"\"\"\n unique_geom_types = df.geometry.geom_type.unique()\n unique_geom_types = [gtype for gtype in unique_geom_types if gtype is not None]\n\n # merge single and Multi types (eg Polygon and MultiPolygon)\n unique_geom_types = [\n gtype\n for gtype in unique_geom_types\n if not gtype.startswith(\"Multi\") or gtype[5:] not in unique_geom_types\n ]\n\n if df.geometry.has_z.any():\n # declare all geometries as 3D geometries\n unique_geom_types = [\"3D \" + type for type in unique_geom_types]\n # by default, all geometries are 2D geometries\n\n return unique_geom_types\n", "path": "geopandas/io/file.py"}], "after_files": [{"content": "from distutils.version import LooseVersion\n\nimport numpy as np\n\nimport fiona\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n# Adapted from pandas.io.common\nfrom urllib.request import urlopen as _urlopen\nfrom urllib.parse import urlparse as parse_url\nfrom urllib.parse import uses_relative, uses_netloc, uses_params\n\n_FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion(\"1.8\")\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard(\"\")\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except Exception:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with fiona_env():\n with reader(path_or_bytes, **kwargs) as features:\n\n # In a future Fiona release the crs attribute of features will\n # no longer be a dict. The following code will be both forward\n # and backward compatible.\n if hasattr(features.crs, \"to_dict\"):\n crs = features.crs.to_dict()\n else:\n crs = features.crs\n\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None, **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n The path may specify a fiona VSI scheme.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n with fiona_env():\n with fiona.open(\n filename, \"w\", driver=driver, crs=df.crs, schema=schema, **kwargs\n ) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n from collections import OrderedDict\n\n # TODO: test pandas string type and boolean type once released\n types = {\"Int64\": \"int\", \"string\": \"str\", \"boolean\": \"bool\"}\n\n def convert_type(column, in_type):\n if in_type == object:\n return \"str\"\n if in_type.name.startswith(\"datetime64\"):\n # numpy datetime type regardless of frequency\n return \"datetime\"\n if str(in_type) in types:\n out_type = types[str(in_type)]\n else:\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == \"long\":\n out_type = \"int\"\n if not _FIONA18 and out_type == \"bool\":\n raise ValueError(\n 'column \"{}\" is boolean type, '.format(column)\n + \"which is unsupported in file writing with fiona \"\n \"< 1.8. Consider casting the column to int type.\"\n )\n return out_type\n\n properties = OrderedDict(\n [\n (col, convert_type(col, _type))\n for col, _type in zip(df.columns, df.dtypes)\n if col != df._geometry_column_name\n ]\n )\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n # Since https://github.com/Toblerity/Fiona/issues/446 resolution,\n # Fiona allows a list of geometry types\n geom_types = _geometry_types(df)\n\n schema = {\"geometry\": geom_types, \"properties\": properties}\n\n return schema\n\n\ndef _geometry_types(df):\n \"\"\"\n Determine the geometry types in the GeoDataFrame for the schema.\n \"\"\"\n if _FIONA18:\n # Starting from Fiona 1.8, schema submitted to fiona to write a gdf\n # can have mixed geometries:\n # - 3D and 2D shapes can coexist in inferred schema\n # - Shape and MultiShape types can (and must) coexist in inferred\n # schema\n geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()\n geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]\n geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()\n geom_types_3D = [\"3D \" + gtype for gtype in geom_types_3D if gtype is not None]\n geom_types = geom_types_3D + geom_types_2D\n\n else:\n # Before Fiona 1.8, schema submitted to write a gdf should have\n # one single geometry type whenever possible:\n # - 3D and 2D shapes cannot coexist in inferred schema\n # - Shape and MultiShape can not coexist in inferred schema\n geom_types = _geometry_types_back_compat(df)\n\n if len(geom_types) == 0:\n # Default geometry type supported by Fiona\n # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)\n return \"Unknown\"\n\n if len(geom_types) == 1:\n geom_types = geom_types[0]\n\n return geom_types\n\n\ndef _geometry_types_back_compat(df):\n \"\"\"\n for backward compatibility with Fiona<1.8 only\n \"\"\"\n unique_geom_types = df.geometry.geom_type.unique()\n unique_geom_types = [gtype for gtype in unique_geom_types if gtype is not None]\n\n # merge single and Multi types (eg Polygon and MultiPolygon)\n unique_geom_types = [\n gtype\n for gtype in unique_geom_types\n if not gtype.startswith(\"Multi\") or gtype[5:] not in unique_geom_types\n ]\n\n if df.geometry.has_z.any():\n # declare all geometries as 3D geometries\n unique_geom_types = [\"3D \" + type for type in unique_geom_types]\n # by default, all geometries are 2D geometries\n\n return unique_geom_types\n", "path": "geopandas/io/file.py"}]} | 2,970 | 279 |
gh_patches_debug_14077 | rasdani/github-patches | git_diff | WordPress__openverse-api-476 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace PYTHON_ENV with ENVIRONMENT
## Description
<!-- Concisely describe the bug. -->
Our deployment infrastructure already configures an `ENVIRONMENT` variable and it is already used in the ingestion server code. Rather than introduce a new one that duplicates that, we can just use that everywhere.
Making this change will solve the issue we're seeing Sentry where staging is reporting itself as `production`.
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [x] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/settings.py`
Content:
```
1 """
2 Django settings for catalog project.
3
4 Generated by 'django-admin startproject' using Django 2.0.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/2.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/2.0/ref/settings/
11 """
12
13 from pathlib import Path
14 from socket import gethostbyname, gethostname
15
16 import sentry_sdk
17 from decouple import config
18 from sentry_sdk.integrations.django import DjangoIntegration
19
20 from .logger import LOGGING # noqa: F401
21
22
23 # Build paths inside the project like this: BASE_DIR.join('dir', 'subdir'...)
24 BASE_DIR = Path(__file__).resolve().parent.parent
25
26 # Where to collect static files in production/development deployments
27 STATIC_ROOT = "/var/api_static_content/static"
28
29 # Logo uploads
30 MEDIA_ROOT = "/var/api_media/"
31 MEDIA_URL = "/media/"
32
33 # Quick-start development settings - unsuitable for production
34 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
35
36 # SECURITY WARNING: keep the secret key used in production secret!
37 SECRET_KEY = config("DJANGO_SECRET_KEY") # required
38
39 # SECURITY WARNING: don't run with debug turned on in production!
40 DEBUG = config("DJANGO_DEBUG_ENABLED", default=False, cast=bool)
41
42 PYTHON_ENV = config("PYTHON_ENV", default="production")
43
44 ALLOWED_HOSTS = [
45 "api-dev.openverse.engineering",
46 "api.openverse.engineering",
47 gethostname(),
48 gethostbyname(gethostname()),
49 ]
50
51 if lb_url := config("LOAD_BALANCER_URL", default=""):
52 ALLOWED_HOSTS.append(lb_url)
53
54 if DEBUG:
55 ALLOWED_HOSTS += [
56 "localhost",
57 "127.0.0.1",
58 "0.0.0.0",
59 ]
60
61 # Domains that shortened links may point to
62 SHORT_URL_WHITELIST = {
63 "api-dev.openverse.engineering",
64 "api.openverse.engineering",
65 "localhost:8000",
66 }
67 SHORT_URL_PATH_WHITELIST = ["/v1/list", "/v1/images/"]
68
69 USE_S3 = config("USE_S3", default=False, cast=bool)
70
71 # Application definition
72
73 INSTALLED_APPS = [
74 "catalog",
75 "catalog.api",
76 "drf_yasg",
77 "django.contrib.admin",
78 "django.contrib.auth",
79 "django.contrib.contenttypes",
80 "django.contrib.sessions",
81 "django.contrib.messages",
82 "django.contrib.staticfiles",
83 "oauth2_provider",
84 "rest_framework",
85 "corsheaders",
86 "sslserver",
87 ]
88
89 if USE_S3:
90 DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
91 AWS_STORAGE_BUCKET_NAME = config("LOGOS_BUCKET", default="openverse_api-logos-prod")
92 AWS_S3_SIGNATURE_VERSION = "s3v4"
93 INSTALLED_APPS.append("storages")
94
95 MIDDLEWARE = [
96 "django.middleware.security.SecurityMiddleware",
97 "django.contrib.sessions.middleware.SessionMiddleware",
98 "corsheaders.middleware.CorsMiddleware",
99 "django.middleware.common.CommonMiddleware",
100 "django.middleware.csrf.CsrfViewMiddleware",
101 "django.contrib.auth.middleware.AuthenticationMiddleware",
102 "django.contrib.messages.middleware.MessageMiddleware",
103 "django.middleware.clickjacking.XFrameOptionsMiddleware",
104 "oauth2_provider.middleware.OAuth2TokenMiddleware",
105 ]
106
107 SWAGGER_SETTINGS = {"SECURITY_DEFINITIONS": {}}
108
109 OAUTH2_PROVIDER = {
110 "SCOPES": {
111 "read": "Read scope",
112 "write": "Write scope",
113 }
114 }
115
116 OAUTH2_PROVIDER_APPLICATION_MODEL = "api.ThrottledApplication"
117
118 REST_FRAMEWORK = {
119 "DEFAULT_AUTHENTICATION_CLASSES": (
120 "oauth2_provider.contrib.rest_framework.OAuth2Authentication",
121 ),
122 "DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
123 "DEFAULT_RENDERER_CLASSES": (
124 "rest_framework.renderers.JSONRenderer",
125 "rest_framework.renderers.BrowsableAPIRenderer",
126 "rest_framework_xml.renderers.XMLRenderer",
127 ),
128 "DEFAULT_THROTTLE_CLASSES": (
129 "catalog.api.utils.throttle.BurstRateThrottle",
130 "catalog.api.utils.throttle.SustainedRateThrottle",
131 "catalog.api.utils.throttle.OAuth2IdThrottleSustainedRate",
132 "catalog.api.utils.throttle.OAuth2IdThrottleBurstRate",
133 "catalog.api.utils.throttle.EnhancedOAuth2IdThrottleSustainedRate",
134 "catalog.api.utils.throttle.EnhancedOAuth2IdThrottleBurstRate",
135 ),
136 "DEFAULT_THROTTLE_RATES": {
137 "anon_burst": "60/min",
138 "anon_sustained": "5000/day",
139 "oauth2_client_credentials_sustained": "10000/day",
140 "oauth2_client_credentials_burst": "100/min",
141 "enhanced_oauth2_client_credentials_sustained": "20000/day",
142 "enhanced_oauth2_client_credentials_burst": "200/min",
143 },
144 "EXCEPTION_HANDLER": "catalog.api.utils.exceptions.exception_handler",
145 }
146
147 if config("DISABLE_GLOBAL_THROTTLING", default=True, cast=bool):
148 del REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"]
149 del REST_FRAMEWORK["DEFAULT_THROTTLE_CLASSES"]
150
151 REDIS_HOST = config("REDIS_HOST", default="localhost")
152 REDIS_PORT = config("REDIS_PORT", default=6379, cast=int)
153 REDIS_PASSWORD = config("REDIS_PASSWORD", default="")
154 CACHES = {
155 # Site cache writes to 'default'
156 "default": {
157 "BACKEND": "django_redis.cache.RedisCache",
158 "LOCATION": f"redis://{REDIS_HOST}:{REDIS_PORT}/0",
159 "OPTIONS": {
160 "CLIENT_CLASS": "django_redis.client.DefaultClient",
161 },
162 },
163 # For rapidly changing stats that we don't want to hammer the database with
164 "traffic_stats": {
165 "BACKEND": "django_redis.cache.RedisCache",
166 "LOCATION": f"redis://{REDIS_HOST}:{REDIS_PORT}/1",
167 "OPTIONS": {
168 "CLIENT_CLASS": "django_redis.client.DefaultClient",
169 },
170 },
171 # For ensuring consistency among multiple Django workers and servers.
172 # Used by Redlock.
173 "locks": {
174 "BACKEND": "django_redis.cache.RedisCache",
175 "LOCATION": f"redis://{REDIS_HOST}:{REDIS_PORT}/2",
176 "OPTIONS": {
177 "CLIENT_CLASS": "django_redis.client.DefaultClient",
178 },
179 },
180 }
181
182 # Produce CC-hosted thumbnails dynamically through a proxy.
183 THUMBNAIL_PROXY_URL = config("THUMBNAIL_PROXY_URL", default="http://localhost:8222")
184
185 THUMBNAIL_WIDTH_PX = 600
186
187 AUTHENTICATION_BACKENDS = (
188 "oauth2_provider.backends.OAuth2Backend",
189 "django.contrib.auth.backends.ModelBackend",
190 )
191
192 ROOT_URLCONF = "catalog.urls"
193
194 TEMPLATES = [
195 {
196 "BACKEND": "django.template.backends.django.DjangoTemplates",
197 "DIRS": [BASE_DIR.joinpath("catalog", "templates")],
198 "APP_DIRS": True,
199 "OPTIONS": {
200 "context_processors": [
201 "django.template.context_processors.debug",
202 "django.template.context_processors.request",
203 "django.contrib.auth.context_processors.auth",
204 "django.contrib.messages.context_processors.messages",
205 ],
206 },
207 },
208 ]
209
210 WSGI_APPLICATION = "catalog.wsgi.application"
211
212 # Database
213 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
214
215 DATABASES = {
216 "default": {
217 "ENGINE": "django.db.backends.postgresql",
218 "HOST": config("DJANGO_DATABASE_HOST", default="localhost"),
219 "PORT": config("DJANGO_DATABASE_PORT", default=5432, cast=int),
220 "USER": config("DJANGO_DATABASE_USER", default="deploy"),
221 "PASSWORD": config("DJANGO_DATABASE_PASSWORD", default="deploy"),
222 "NAME": config("DJANGO_DATABASE_NAME", default="openledger"),
223 },
224 "upstream": {
225 "ENGINE": "django.db.backends.postgresql",
226 "HOST": config("UPSTREAM_DATABASE_HOST", default="localhost"),
227 "PORT": config("UPSTREAM_DATABASE_PORT", default=5433, cast=int),
228 "USER": config("UPSTREAM_DATABASE_USER", default="deploy"),
229 "PASSWORD": config("UPSTREAM_DATABASE_PASSWORD", default="deploy"),
230 "NAME": config("UPSTREAM_DATABASE_NAME", default="openledger"),
231 },
232 }
233
234 # Password validation
235 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
236
237 AUTH_PASSWORD_VALIDATORS = [
238 {
239 "NAME": "django.contrib.auth.password_validation"
240 ".UserAttributeSimilarityValidator",
241 },
242 {
243 "NAME": "django.contrib.auth.password_validation" ".MinimumLengthValidator",
244 },
245 {
246 "NAME": "django.contrib.auth.password_validation" ".CommonPasswordValidator",
247 },
248 {
249 "NAME": "django.contrib.auth.password_validation" ".NumericPasswordValidator",
250 },
251 ]
252
253 # Internationalization
254 # https://docs.djangoproject.com/en/2.0/topics/i18n/
255
256 LANGUAGE_CODE = "en-us"
257
258 TIME_ZONE = "UTC"
259
260 USE_I18N = True
261
262 USE_L10N = True
263
264 USE_TZ = True
265
266 # Static files (CSS, JavaScript, Images)
267 # https://docs.djangoproject.com/en/2.0/howto/static-files/
268
269 STATIC_URL = "/static/"
270
271 # Allow anybody to access the API from any domain
272 CORS_ORIGIN_ALLOW_ALL = True
273
274 # The version of the API. We follow the semantic version specification.
275 API_VERSION = config("SEMANTIC_VERSION", default="Version not specified")
276
277 # The contact email of the Openverse team
278 CONTACT_EMAIL = config("CONTACT_EMAIL", default="[email protected]")
279
280 WATERMARK_ENABLED = config("WATERMARK_ENABLED", default=False, cast=bool)
281
282 ELASTICSEARCH_URL = config("ELASTICSEARCH_URL", default="localhost")
283 ELASTICSEARCH_PORT = config("ELASTICSEARCH_PORT", default=9200, cast=int)
284 ELASTICSEARCH_AWS_REGION = config("ELASTICSEARCH_AWS_REGION", default="us-east-1")
285
286 # Additional settings for dev/prod environments
287 AWS_ACCESS_KEY_ID = config("AWS_ACCESS_KEY_ID", default="")
288 AWS_SECRET_ACCESS_KEY = config("AWS_SECRET_ACCESS_KEY", default="")
289
290 EMAIL_SENDER = config("EMAIL_SENDER", default="")
291 EMAIL_HOST = config("EMAIL_HOST", default="")
292 EMAIL_PORT = config("EMAIL_PORT", default=25, cast=int)
293 EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="")
294 EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="")
295 EMAIL_SUBJECT_PREFIX = "[noreply]"
296 EMAIL_USE_TLS = True
297
298 if EMAIL_HOST_USER or EMAIL_HOST_PASSWORD:
299 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
300 else:
301 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
302
303 # Log full Elasticsearch response
304 VERBOSE_ES_RESPONSE = config("DEBUG_SCORES", default=False, cast=bool)
305
306 # Whether to boost results by authority and popularity
307 USE_RANK_FEATURES = config("USE_RANK_FEATURES", default=True, cast=bool)
308
309 SENTRY_DSN = config(
310 "SENTRY_DSN",
311 default="https://[email protected]/6107216",
312 )
313 SENTRY_SAMPLE_RATE = config("SENTRY_SAMPLE_RATE", default=1.0, cast=float)
314
315 if not DEBUG:
316 sentry_sdk.init(
317 dsn=SENTRY_DSN,
318 integrations=[DjangoIntegration()],
319 traces_sample_rate=SENTRY_SAMPLE_RATE,
320 send_default_pii=False,
321 environment=PYTHON_ENV,
322 )
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/catalog/settings.py b/api/catalog/settings.py
--- a/api/catalog/settings.py
+++ b/api/catalog/settings.py
@@ -39,7 +39,7 @@
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DJANGO_DEBUG_ENABLED", default=False, cast=bool)
-PYTHON_ENV = config("PYTHON_ENV", default="production")
+ENVIRONMENT = config("ENVIRONMENT", default="local")
ALLOWED_HOSTS = [
"api-dev.openverse.engineering",
@@ -318,5 +318,5 @@
integrations=[DjangoIntegration()],
traces_sample_rate=SENTRY_SAMPLE_RATE,
send_default_pii=False,
- environment=PYTHON_ENV,
+ environment=ENVIRONMENT,
)
| {"golden_diff": "diff --git a/api/catalog/settings.py b/api/catalog/settings.py\n--- a/api/catalog/settings.py\n+++ b/api/catalog/settings.py\n@@ -39,7 +39,7 @@\n # SECURITY WARNING: don't run with debug turned on in production!\n DEBUG = config(\"DJANGO_DEBUG_ENABLED\", default=False, cast=bool)\n \n-PYTHON_ENV = config(\"PYTHON_ENV\", default=\"production\")\n+ENVIRONMENT = config(\"ENVIRONMENT\", default=\"local\")\n \n ALLOWED_HOSTS = [\n \"api-dev.openverse.engineering\",\n@@ -318,5 +318,5 @@\n integrations=[DjangoIntegration()],\n traces_sample_rate=SENTRY_SAMPLE_RATE,\n send_default_pii=False,\n- environment=PYTHON_ENV,\n+ environment=ENVIRONMENT,\n )\n", "issue": "Replace PYTHON_ENV with ENVIRONMENT\n## Description\r\n<!-- Concisely describe the bug. -->\r\nOur deployment infrastructure already configures an `ENVIRONMENT` variable and it is already used in the ingestion server code. Rather than introduce a new one that duplicates that, we can just use that everywhere.\r\n\r\nMaking this change will solve the issue we're seeing Sentry where staging is reporting itself as `production`.\r\n\r\n## Resolution\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [x] \ud83d\ude4b I would be interested in resolving this bug.\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for catalog project.\n\nGenerated by 'django-admin startproject' using Django 2.0.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nfrom pathlib import Path\nfrom socket import gethostbyname, gethostname\n\nimport sentry_sdk\nfrom decouple import config\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom .logger import LOGGING # noqa: F401\n\n\n# Build paths inside the project like this: BASE_DIR.join('dir', 'subdir'...)\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Where to collect static files in production/development deployments\nSTATIC_ROOT = \"/var/api_static_content/static\"\n\n# Logo uploads\nMEDIA_ROOT = \"/var/api_media/\"\nMEDIA_URL = \"/media/\"\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config(\"DJANGO_SECRET_KEY\") # required\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config(\"DJANGO_DEBUG_ENABLED\", default=False, cast=bool)\n\nPYTHON_ENV = config(\"PYTHON_ENV\", default=\"production\")\n\nALLOWED_HOSTS = [\n \"api-dev.openverse.engineering\",\n \"api.openverse.engineering\",\n gethostname(),\n gethostbyname(gethostname()),\n]\n\nif lb_url := config(\"LOAD_BALANCER_URL\", default=\"\"):\n ALLOWED_HOSTS.append(lb_url)\n\nif DEBUG:\n ALLOWED_HOSTS += [\n \"localhost\",\n \"127.0.0.1\",\n \"0.0.0.0\",\n ]\n\n# Domains that shortened links may point to\nSHORT_URL_WHITELIST = {\n \"api-dev.openverse.engineering\",\n \"api.openverse.engineering\",\n \"localhost:8000\",\n}\nSHORT_URL_PATH_WHITELIST = [\"/v1/list\", \"/v1/images/\"]\n\nUSE_S3 = config(\"USE_S3\", default=False, cast=bool)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"catalog\",\n \"catalog.api\",\n \"drf_yasg\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"oauth2_provider\",\n \"rest_framework\",\n \"corsheaders\",\n \"sslserver\",\n]\n\nif USE_S3:\n DEFAULT_FILE_STORAGE = \"storages.backends.s3boto3.S3Boto3Storage\"\n AWS_STORAGE_BUCKET_NAME = config(\"LOGOS_BUCKET\", default=\"openverse_api-logos-prod\")\n AWS_S3_SIGNATURE_VERSION = \"s3v4\"\n INSTALLED_APPS.append(\"storages\")\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"oauth2_provider.middleware.OAuth2TokenMiddleware\",\n]\n\nSWAGGER_SETTINGS = {\"SECURITY_DEFINITIONS\": {}}\n\nOAUTH2_PROVIDER = {\n \"SCOPES\": {\n \"read\": \"Read scope\",\n \"write\": \"Write scope\",\n }\n}\n\nOAUTH2_PROVIDER_APPLICATION_MODEL = \"api.ThrottledApplication\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"oauth2_provider.contrib.rest_framework.OAuth2Authentication\",\n ),\n \"DEFAULT_VERSIONING_CLASS\": \"rest_framework.versioning.URLPathVersioning\",\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n \"rest_framework_xml.renderers.XMLRenderer\",\n ),\n \"DEFAULT_THROTTLE_CLASSES\": (\n \"catalog.api.utils.throttle.BurstRateThrottle\",\n \"catalog.api.utils.throttle.SustainedRateThrottle\",\n \"catalog.api.utils.throttle.OAuth2IdThrottleSustainedRate\",\n \"catalog.api.utils.throttle.OAuth2IdThrottleBurstRate\",\n \"catalog.api.utils.throttle.EnhancedOAuth2IdThrottleSustainedRate\",\n \"catalog.api.utils.throttle.EnhancedOAuth2IdThrottleBurstRate\",\n ),\n \"DEFAULT_THROTTLE_RATES\": {\n \"anon_burst\": \"60/min\",\n \"anon_sustained\": \"5000/day\",\n \"oauth2_client_credentials_sustained\": \"10000/day\",\n \"oauth2_client_credentials_burst\": \"100/min\",\n \"enhanced_oauth2_client_credentials_sustained\": \"20000/day\",\n \"enhanced_oauth2_client_credentials_burst\": \"200/min\",\n },\n \"EXCEPTION_HANDLER\": \"catalog.api.utils.exceptions.exception_handler\",\n}\n\nif config(\"DISABLE_GLOBAL_THROTTLING\", default=True, cast=bool):\n del REST_FRAMEWORK[\"DEFAULT_THROTTLE_RATES\"]\n del REST_FRAMEWORK[\"DEFAULT_THROTTLE_CLASSES\"]\n\nREDIS_HOST = config(\"REDIS_HOST\", default=\"localhost\")\nREDIS_PORT = config(\"REDIS_PORT\", default=6379, cast=int)\nREDIS_PASSWORD = config(\"REDIS_PASSWORD\", default=\"\")\nCACHES = {\n # Site cache writes to 'default'\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n # For rapidly changing stats that we don't want to hammer the database with\n \"traffic_stats\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOST}:{REDIS_PORT}/1\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n # For ensuring consistency among multiple Django workers and servers.\n # Used by Redlock.\n \"locks\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOST}:{REDIS_PORT}/2\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n}\n\n# Produce CC-hosted thumbnails dynamically through a proxy.\nTHUMBNAIL_PROXY_URL = config(\"THUMBNAIL_PROXY_URL\", default=\"http://localhost:8222\")\n\nTHUMBNAIL_WIDTH_PX = 600\n\nAUTHENTICATION_BACKENDS = (\n \"oauth2_provider.backends.OAuth2Backend\",\n \"django.contrib.auth.backends.ModelBackend\",\n)\n\nROOT_URLCONF = \"catalog.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR.joinpath(\"catalog\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"catalog.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": config(\"DJANGO_DATABASE_HOST\", default=\"localhost\"),\n \"PORT\": config(\"DJANGO_DATABASE_PORT\", default=5432, cast=int),\n \"USER\": config(\"DJANGO_DATABASE_USER\", default=\"deploy\"),\n \"PASSWORD\": config(\"DJANGO_DATABASE_PASSWORD\", default=\"deploy\"),\n \"NAME\": config(\"DJANGO_DATABASE_NAME\", default=\"openledger\"),\n },\n \"upstream\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": config(\"UPSTREAM_DATABASE_HOST\", default=\"localhost\"),\n \"PORT\": config(\"UPSTREAM_DATABASE_PORT\", default=5433, cast=int),\n \"USER\": config(\"UPSTREAM_DATABASE_USER\", default=\"deploy\"),\n \"PASSWORD\": config(\"UPSTREAM_DATABASE_PASSWORD\", default=\"deploy\"),\n \"NAME\": config(\"UPSTREAM_DATABASE_NAME\", default=\"openledger\"),\n },\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation\"\n \".UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation\" \".MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation\" \".CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation\" \".NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\n# Allow anybody to access the API from any domain\nCORS_ORIGIN_ALLOW_ALL = True\n\n# The version of the API. We follow the semantic version specification.\nAPI_VERSION = config(\"SEMANTIC_VERSION\", default=\"Version not specified\")\n\n# The contact email of the Openverse team\nCONTACT_EMAIL = config(\"CONTACT_EMAIL\", default=\"[email protected]\")\n\nWATERMARK_ENABLED = config(\"WATERMARK_ENABLED\", default=False, cast=bool)\n\nELASTICSEARCH_URL = config(\"ELASTICSEARCH_URL\", default=\"localhost\")\nELASTICSEARCH_PORT = config(\"ELASTICSEARCH_PORT\", default=9200, cast=int)\nELASTICSEARCH_AWS_REGION = config(\"ELASTICSEARCH_AWS_REGION\", default=\"us-east-1\")\n\n# Additional settings for dev/prod environments\nAWS_ACCESS_KEY_ID = config(\"AWS_ACCESS_KEY_ID\", default=\"\")\nAWS_SECRET_ACCESS_KEY = config(\"AWS_SECRET_ACCESS_KEY\", default=\"\")\n\nEMAIL_SENDER = config(\"EMAIL_SENDER\", default=\"\")\nEMAIL_HOST = config(\"EMAIL_HOST\", default=\"\")\nEMAIL_PORT = config(\"EMAIL_PORT\", default=25, cast=int)\nEMAIL_HOST_USER = config(\"EMAIL_HOST_USER\", default=\"\")\nEMAIL_HOST_PASSWORD = config(\"EMAIL_HOST_PASSWORD\", default=\"\")\nEMAIL_SUBJECT_PREFIX = \"[noreply]\"\nEMAIL_USE_TLS = True\n\nif EMAIL_HOST_USER or EMAIL_HOST_PASSWORD:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# Log full Elasticsearch response\nVERBOSE_ES_RESPONSE = config(\"DEBUG_SCORES\", default=False, cast=bool)\n\n# Whether to boost results by authority and popularity\nUSE_RANK_FEATURES = config(\"USE_RANK_FEATURES\", default=True, cast=bool)\n\nSENTRY_DSN = config(\n \"SENTRY_DSN\",\n default=\"https://[email protected]/6107216\",\n)\nSENTRY_SAMPLE_RATE = config(\"SENTRY_SAMPLE_RATE\", default=1.0, cast=float)\n\nif not DEBUG:\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration()],\n traces_sample_rate=SENTRY_SAMPLE_RATE,\n send_default_pii=False,\n environment=PYTHON_ENV,\n )\n", "path": "api/catalog/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for catalog project.\n\nGenerated by 'django-admin startproject' using Django 2.0.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nfrom pathlib import Path\nfrom socket import gethostbyname, gethostname\n\nimport sentry_sdk\nfrom decouple import config\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom .logger import LOGGING # noqa: F401\n\n\n# Build paths inside the project like this: BASE_DIR.join('dir', 'subdir'...)\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Where to collect static files in production/development deployments\nSTATIC_ROOT = \"/var/api_static_content/static\"\n\n# Logo uploads\nMEDIA_ROOT = \"/var/api_media/\"\nMEDIA_URL = \"/media/\"\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config(\"DJANGO_SECRET_KEY\") # required\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config(\"DJANGO_DEBUG_ENABLED\", default=False, cast=bool)\n\nENVIRONMENT = config(\"ENVIRONMENT\", default=\"local\")\n\nALLOWED_HOSTS = [\n \"api-dev.openverse.engineering\",\n \"api.openverse.engineering\",\n gethostname(),\n gethostbyname(gethostname()),\n]\n\nif lb_url := config(\"LOAD_BALANCER_URL\", default=\"\"):\n ALLOWED_HOSTS.append(lb_url)\n\nif DEBUG:\n ALLOWED_HOSTS += [\n \"localhost\",\n \"127.0.0.1\",\n \"0.0.0.0\",\n ]\n\n# Domains that shortened links may point to\nSHORT_URL_WHITELIST = {\n \"api-dev.openverse.engineering\",\n \"api.openverse.engineering\",\n \"localhost:8000\",\n}\nSHORT_URL_PATH_WHITELIST = [\"/v1/list\", \"/v1/images/\"]\n\nUSE_S3 = config(\"USE_S3\", default=False, cast=bool)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"catalog\",\n \"catalog.api\",\n \"drf_yasg\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"oauth2_provider\",\n \"rest_framework\",\n \"corsheaders\",\n \"sslserver\",\n]\n\nif USE_S3:\n DEFAULT_FILE_STORAGE = \"storages.backends.s3boto3.S3Boto3Storage\"\n AWS_STORAGE_BUCKET_NAME = config(\"LOGOS_BUCKET\", default=\"openverse_api-logos-prod\")\n AWS_S3_SIGNATURE_VERSION = \"s3v4\"\n INSTALLED_APPS.append(\"storages\")\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"oauth2_provider.middleware.OAuth2TokenMiddleware\",\n]\n\nSWAGGER_SETTINGS = {\"SECURITY_DEFINITIONS\": {}}\n\nOAUTH2_PROVIDER = {\n \"SCOPES\": {\n \"read\": \"Read scope\",\n \"write\": \"Write scope\",\n }\n}\n\nOAUTH2_PROVIDER_APPLICATION_MODEL = \"api.ThrottledApplication\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"oauth2_provider.contrib.rest_framework.OAuth2Authentication\",\n ),\n \"DEFAULT_VERSIONING_CLASS\": \"rest_framework.versioning.URLPathVersioning\",\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n \"rest_framework_xml.renderers.XMLRenderer\",\n ),\n \"DEFAULT_THROTTLE_CLASSES\": (\n \"catalog.api.utils.throttle.BurstRateThrottle\",\n \"catalog.api.utils.throttle.SustainedRateThrottle\",\n \"catalog.api.utils.throttle.OAuth2IdThrottleSustainedRate\",\n \"catalog.api.utils.throttle.OAuth2IdThrottleBurstRate\",\n \"catalog.api.utils.throttle.EnhancedOAuth2IdThrottleSustainedRate\",\n \"catalog.api.utils.throttle.EnhancedOAuth2IdThrottleBurstRate\",\n ),\n \"DEFAULT_THROTTLE_RATES\": {\n \"anon_burst\": \"60/min\",\n \"anon_sustained\": \"5000/day\",\n \"oauth2_client_credentials_sustained\": \"10000/day\",\n \"oauth2_client_credentials_burst\": \"100/min\",\n \"enhanced_oauth2_client_credentials_sustained\": \"20000/day\",\n \"enhanced_oauth2_client_credentials_burst\": \"200/min\",\n },\n \"EXCEPTION_HANDLER\": \"catalog.api.utils.exceptions.exception_handler\",\n}\n\nif config(\"DISABLE_GLOBAL_THROTTLING\", default=True, cast=bool):\n del REST_FRAMEWORK[\"DEFAULT_THROTTLE_RATES\"]\n del REST_FRAMEWORK[\"DEFAULT_THROTTLE_CLASSES\"]\n\nREDIS_HOST = config(\"REDIS_HOST\", default=\"localhost\")\nREDIS_PORT = config(\"REDIS_PORT\", default=6379, cast=int)\nREDIS_PASSWORD = config(\"REDIS_PASSWORD\", default=\"\")\nCACHES = {\n # Site cache writes to 'default'\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n # For rapidly changing stats that we don't want to hammer the database with\n \"traffic_stats\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOST}:{REDIS_PORT}/1\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n # For ensuring consistency among multiple Django workers and servers.\n # Used by Redlock.\n \"locks\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOST}:{REDIS_PORT}/2\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n}\n\n# Produce CC-hosted thumbnails dynamically through a proxy.\nTHUMBNAIL_PROXY_URL = config(\"THUMBNAIL_PROXY_URL\", default=\"http://localhost:8222\")\n\nTHUMBNAIL_WIDTH_PX = 600\n\nAUTHENTICATION_BACKENDS = (\n \"oauth2_provider.backends.OAuth2Backend\",\n \"django.contrib.auth.backends.ModelBackend\",\n)\n\nROOT_URLCONF = \"catalog.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR.joinpath(\"catalog\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"catalog.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": config(\"DJANGO_DATABASE_HOST\", default=\"localhost\"),\n \"PORT\": config(\"DJANGO_DATABASE_PORT\", default=5432, cast=int),\n \"USER\": config(\"DJANGO_DATABASE_USER\", default=\"deploy\"),\n \"PASSWORD\": config(\"DJANGO_DATABASE_PASSWORD\", default=\"deploy\"),\n \"NAME\": config(\"DJANGO_DATABASE_NAME\", default=\"openledger\"),\n },\n \"upstream\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": config(\"UPSTREAM_DATABASE_HOST\", default=\"localhost\"),\n \"PORT\": config(\"UPSTREAM_DATABASE_PORT\", default=5433, cast=int),\n \"USER\": config(\"UPSTREAM_DATABASE_USER\", default=\"deploy\"),\n \"PASSWORD\": config(\"UPSTREAM_DATABASE_PASSWORD\", default=\"deploy\"),\n \"NAME\": config(\"UPSTREAM_DATABASE_NAME\", default=\"openledger\"),\n },\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation\"\n \".UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation\" \".MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation\" \".CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation\" \".NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\n# Allow anybody to access the API from any domain\nCORS_ORIGIN_ALLOW_ALL = True\n\n# The version of the API. We follow the semantic version specification.\nAPI_VERSION = config(\"SEMANTIC_VERSION\", default=\"Version not specified\")\n\n# The contact email of the Openverse team\nCONTACT_EMAIL = config(\"CONTACT_EMAIL\", default=\"[email protected]\")\n\nWATERMARK_ENABLED = config(\"WATERMARK_ENABLED\", default=False, cast=bool)\n\nELASTICSEARCH_URL = config(\"ELASTICSEARCH_URL\", default=\"localhost\")\nELASTICSEARCH_PORT = config(\"ELASTICSEARCH_PORT\", default=9200, cast=int)\nELASTICSEARCH_AWS_REGION = config(\"ELASTICSEARCH_AWS_REGION\", default=\"us-east-1\")\n\n# Additional settings for dev/prod environments\nAWS_ACCESS_KEY_ID = config(\"AWS_ACCESS_KEY_ID\", default=\"\")\nAWS_SECRET_ACCESS_KEY = config(\"AWS_SECRET_ACCESS_KEY\", default=\"\")\n\nEMAIL_SENDER = config(\"EMAIL_SENDER\", default=\"\")\nEMAIL_HOST = config(\"EMAIL_HOST\", default=\"\")\nEMAIL_PORT = config(\"EMAIL_PORT\", default=25, cast=int)\nEMAIL_HOST_USER = config(\"EMAIL_HOST_USER\", default=\"\")\nEMAIL_HOST_PASSWORD = config(\"EMAIL_HOST_PASSWORD\", default=\"\")\nEMAIL_SUBJECT_PREFIX = \"[noreply]\"\nEMAIL_USE_TLS = True\n\nif EMAIL_HOST_USER or EMAIL_HOST_PASSWORD:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# Log full Elasticsearch response\nVERBOSE_ES_RESPONSE = config(\"DEBUG_SCORES\", default=False, cast=bool)\n\n# Whether to boost results by authority and popularity\nUSE_RANK_FEATURES = config(\"USE_RANK_FEATURES\", default=True, cast=bool)\n\nSENTRY_DSN = config(\n \"SENTRY_DSN\",\n default=\"https://[email protected]/6107216\",\n)\nSENTRY_SAMPLE_RATE = config(\"SENTRY_SAMPLE_RATE\", default=1.0, cast=float)\n\nif not DEBUG:\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration()],\n traces_sample_rate=SENTRY_SAMPLE_RATE,\n send_default_pii=False,\n environment=ENVIRONMENT,\n )\n", "path": "api/catalog/settings.py"}]} | 3,821 | 168 |
gh_patches_debug_11382 | rasdani/github-patches | git_diff | activeloopai__deeplake-683 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]
## 🐛🐛 Bug Report
### ⚗️ Current Behavior
Hub's version info is present in two locations, `setup.py` and `hub/version.py`. As result, the released version displays the wrong version info (1.2.3 instead of 1.3.0) when users do `hub --version`.
### 🧰 Possible Solution (optional)
Remove version info from `setup.py`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 License:
3 This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
4 If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
5 """
6
7 import os
8
9 from setuptools import find_packages, setup
10
11 project = "hub"
12 VERSION = "1.3.0"
13
14 this_directory = os.path.abspath(os.path.dirname(__file__))
15 with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
16 long_description = f.read()
17
18 with open(os.path.join(this_directory, "requirements.txt")) as f:
19 requirements = f.readlines()
20
21 setup(
22 name=project,
23 version=VERSION,
24 description="Activeloop Hub",
25 long_description=long_description,
26 long_description_content_type="text/markdown",
27 author="Snark AI Inc.",
28 author_email="[email protected]",
29 license="MPL 2.0",
30 url="https://github.com/activeloopai/Hub",
31 packages=find_packages(),
32 include_package_data=True,
33 zip_safe=False,
34 keywords="snark-hub",
35 project_urls={
36 "Documentation": "https://docs.activeloop.ai/",
37 "Source": "https://github.com/activeloopai/Hub",
38 },
39 classifiers=[
40 "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
41 "Programming Language :: Python :: 3",
42 "Programming Language :: Python :: 3.6",
43 "Programming Language :: Python :: 3.7",
44 "Programming Language :: Python :: 3.8",
45 "Programming Language :: Python :: 3.9",
46 "Programming Language :: Python :: 3 :: Only",
47 ],
48 python_requires=">=3.6",
49 install_requires=requirements,
50 setup_requires=[],
51 dependency_links=[],
52 entry_points={
53 "console_scripts": [
54 "activeloop = hub.cli.command:cli",
55 "activeloop-local = hub.cli.local:cli",
56 "activeloop-dev = hub.cli.dev:cli",
57 "hub = hub.cli.command:cli",
58 ]
59 },
60 tests_require=["pytest", "mock>=1.0.1"],
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,6 @@
from setuptools import find_packages, setup
project = "hub"
-VERSION = "1.3.0"
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
@@ -20,7 +19,6 @@
setup(
name=project,
- version=VERSION,
description="Activeloop Hub",
long_description=long_description,
long_description_content_type="text/markdown",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,6 @@\n from setuptools import find_packages, setup\n \n project = \"hub\"\n-VERSION = \"1.3.0\"\n \n this_directory = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(this_directory, \"README.md\")) as f:\n@@ -20,7 +19,6 @@\n \n setup(\n name=project,\n- version=VERSION,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "[BUG]\n## \ud83d\udc1b\ud83d\udc1b Bug Report\r\n\r\n\r\n### \u2697\ufe0f Current Behavior\r\nHub's version info is present in two locations, `setup.py` and `hub/version.py`. As result, the released version displays the wrong version info (1.2.3 instead of 1.3.0) when users do `hub --version`.\r\n\r\n### \ud83e\uddf0 Possible Solution (optional)\r\nRemove version info from `setup.py`.\n", "before_files": [{"content": "\"\"\"\nLicense:\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nproject = \"hub\"\nVERSION = \"1.3.0\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nwith open(os.path.join(this_directory, \"requirements.txt\")) as f:\n requirements = f.readlines()\n\nsetup(\n name=project,\n version=VERSION,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Snark AI Inc.\",\n author_email=\"[email protected]\",\n license=\"MPL 2.0\",\n url=\"https://github.com/activeloopai/Hub\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n keywords=\"snark-hub\",\n project_urls={\n \"Documentation\": \"https://docs.activeloop.ai/\",\n \"Source\": \"https://github.com/activeloopai/Hub\",\n },\n classifiers=[\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n python_requires=\">=3.6\",\n install_requires=requirements,\n setup_requires=[],\n dependency_links=[],\n entry_points={\n \"console_scripts\": [\n \"activeloop = hub.cli.command:cli\",\n \"activeloop-local = hub.cli.local:cli\",\n \"activeloop-dev = hub.cli.dev:cli\",\n \"hub = hub.cli.command:cli\",\n ]\n },\n tests_require=[\"pytest\", \"mock>=1.0.1\"],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\nLicense:\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nproject = \"hub\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\nwith open(os.path.join(this_directory, \"requirements.txt\")) as f:\n requirements = f.readlines()\n\nsetup(\n name=project,\n description=\"Activeloop Hub\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Snark AI Inc.\",\n author_email=\"[email protected]\",\n license=\"MPL 2.0\",\n url=\"https://github.com/activeloopai/Hub\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n keywords=\"snark-hub\",\n project_urls={\n \"Documentation\": \"https://docs.activeloop.ai/\",\n \"Source\": \"https://github.com/activeloopai/Hub\",\n },\n classifiers=[\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n python_requires=\">=3.6\",\n install_requires=requirements,\n setup_requires=[],\n dependency_links=[],\n entry_points={\n \"console_scripts\": [\n \"activeloop = hub.cli.command:cli\",\n \"activeloop-local = hub.cli.local:cli\",\n \"activeloop-dev = hub.cli.dev:cli\",\n \"hub = hub.cli.command:cli\",\n ]\n },\n tests_require=[\"pytest\", \"mock>=1.0.1\"],\n)\n", "path": "setup.py"}]} | 950 | 132 |
gh_patches_debug_14878 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TX: No vote data for v2 API
State: TX
There is no vote data being returned for TX bills/resolutions for the 86 session in the v2 API. I am opening this issue because it is specific to the latest API, and previous issues were specific to the v1 API.
I would like to have a conversation with someone from open-states who can explain your use of the scrapers for retrieving vote data, since it appears you need some help to update/fix these. We have some people that can probably be mobilized to help with this from [Open Austin](https://www.open-austin.org/]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/tx/__init__.py`
Content:
```
1 from pupa.scrape import Jurisdiction, Organization
2
3 from openstates.utils import url_xpath
4 from .bills import TXBillScraper
5 # from .committees import TXCommitteeScraper
6 # from .events import TXEventScraper
7 from .people import TXPersonScraper
8 # from .votes import TXVoteScraper
9
10
11 class Texas(Jurisdiction):
12 division_id = "ocd-division/country:us/state:tx"
13 classification = "government"
14 name = "Texas"
15 url = "https://capitol.texas.gov/"
16 scrapers = {
17 'people': TXPersonScraper,
18 # 'committees': TXCommitteeScraper,
19 'bills': TXBillScraper,
20 # Re-enable vote scraper when adding next regular session
21 # 'votes': TXVoteScraper,
22 # 'events': TXEventScraper
23 }
24 legislative_sessions = [
25 {
26 "_scraped_name": "81(R) - 2009",
27 "classification": "primary",
28 "end_date": "2009-06-01",
29 "identifier": "81",
30 "name": "81st Legislature (2009)",
31 "start_date": "2009-01-13"
32 },
33 {
34 "_scraped_name": "81(1) - 2009",
35 "classification": "special",
36 "end_date": "2009-07-02",
37 "identifier": "811",
38 "name": "81st Legislature, 1st Called Session (2009)",
39 "start_date": "2009-07-01"
40 },
41 {
42 "_scraped_name": "82(R) - 2011",
43 "classification": "primary",
44 "end_date": "2011-05-30",
45 "identifier": "82",
46 "name": "82nd Legislature (2011)",
47 "start_date": "2011-01-11"
48 },
49 {
50 "_scraped_name": "82(1) - 2011",
51 "classification": "special",
52 "end_date": "2011-06-29",
53 "identifier": "821",
54 "name": "82nd Legislature, 1st Called Session (2011)",
55 "start_date": "2011-05-31"
56 },
57 {
58 "_scraped_name": "83(R) - 2013",
59 "classification": "primary",
60 "end_date": "2013-05-27",
61 "identifier": "83",
62 "name": "83rd Legislature (2013)",
63 "start_date": "2013-01-08"
64 },
65 {
66 "_scraped_name": "83(1) - 2013",
67 "classification": "special",
68 "end_date": "2013-06-25",
69 "identifier": "831",
70 "name": "83nd Legislature, 1st Called Session (2013)",
71 "start_date": "2013-05-27"
72 },
73 {
74 "_scraped_name": "83(2) - 2013",
75 "classification": "special",
76 "end_date": "2013-07-30",
77 "identifier": "832",
78 "name": "83nd Legislature, 2st Called Session (2013)",
79 "start_date": "2013-07-01"
80 },
81 {
82 "_scraped_name": "83(3) - 2013",
83 "classification": "special",
84 "end_date": "2013-08-05",
85 "identifier": "833",
86 "name": "83nd Legislature, 3rd Called Session (2013)",
87 "start_date": "2013-07-30"
88 },
89 {
90 "_scraped_name": "84(R) - 2015",
91 "classification": "primary",
92 "end_date": "2015-06-01",
93 "identifier": "84",
94 "name": "84th Legislature (2015)",
95 "start_date": "2015-01-13"
96 },
97 {
98 "_scraped_name": "85(R) - 2017",
99 "classification": "primary",
100 "end_date": "2017-06-01",
101 "identifier": "85",
102 "name": "85th Legislature (2017)",
103 "start_date": "2017-01-13"
104 },
105 {
106 "_scraped_name": "85(1) - 2017",
107 "classification": "special",
108 "end_date": "2017-08-19",
109 "identifier": "851",
110 "name": "85nd Legislature, 1st Called Session (2017)",
111 "start_date": "2017-07-10"
112 },
113 {
114 "_scraped_name": "86(R) - 2019",
115 "classification": "primary",
116 "end_date": "2019-05-27",
117 "identifier": "86",
118 "name": "86th Legislature (2019)",
119 "start_date": "2019-01-08"
120 },
121 # TODO: Re-enable vote scraper when adding next regular session
122 ]
123 ignored_scraped_sessions = [
124 "80(R) - 2007",
125 "79(3) - 2006",
126 "79(2) - 2005",
127 "79(1) - 2005",
128 "79(R) - 2005",
129 "78(4) - 2004",
130 "78(3) - 2003",
131 "78(2) - 2003",
132 "78(1) - 2003",
133 "78(R) - 2003",
134 "77(R) - 2001",
135 "76(R) - 1999",
136 "75(R) - 1997",
137 "74(R) - 1995",
138 "73(R) - 1993",
139 "72(4) - 1992",
140 "72(3) - 1992",
141 "72(2) - 1991",
142 "72(1) - 1991",
143 "72(R) - 1991",
144 "71(6) - 1990",
145 "71(5) - 1990",
146 "71(4) - 1990",
147 "71(3) - 1990",
148 "71(2) - 1989",
149 "71(1) - 1989",
150 "71(R) - 1989"
151 ]
152
153 def get_session_list(self):
154 return url_xpath('https://capitol.texas.gov/',
155 '//select[@name="cboLegSess"]/option/text()')
156
157 def get_organizations(self):
158 legislature_name = "Texas Legislature"
159
160 legislature = Organization(name=legislature_name,
161 classification="legislature")
162 upper = Organization('Senate', classification='upper',
163 parent_id=legislature._id)
164 lower = Organization('House', classification='lower',
165 parent_id=legislature._id)
166
167 yield Organization(name='Office of the Governor', classification='executive')
168 yield legislature
169 yield upper
170 yield lower
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/tx/__init__.py b/openstates/tx/__init__.py
--- a/openstates/tx/__init__.py
+++ b/openstates/tx/__init__.py
@@ -5,7 +5,7 @@
# from .committees import TXCommitteeScraper
# from .events import TXEventScraper
from .people import TXPersonScraper
-# from .votes import TXVoteScraper
+from .votes import TXVoteScraper
class Texas(Jurisdiction):
@@ -18,7 +18,7 @@
# 'committees': TXCommitteeScraper,
'bills': TXBillScraper,
# Re-enable vote scraper when adding next regular session
- # 'votes': TXVoteScraper,
+ 'votes': TXVoteScraper,
# 'events': TXEventScraper
}
legislative_sessions = [
| {"golden_diff": "diff --git a/openstates/tx/__init__.py b/openstates/tx/__init__.py\n--- a/openstates/tx/__init__.py\n+++ b/openstates/tx/__init__.py\n@@ -5,7 +5,7 @@\n # from .committees import TXCommitteeScraper\n # from .events import TXEventScraper\n from .people import TXPersonScraper\n-# from .votes import TXVoteScraper\n+from .votes import TXVoteScraper\n \n \n class Texas(Jurisdiction):\n@@ -18,7 +18,7 @@\n # 'committees': TXCommitteeScraper,\n 'bills': TXBillScraper,\n # Re-enable vote scraper when adding next regular session\n- # 'votes': TXVoteScraper,\n+ 'votes': TXVoteScraper,\n # 'events': TXEventScraper\n }\n legislative_sessions = [\n", "issue": "TX: No vote data for v2 API\nState: TX\r\n\r\nThere is no vote data being returned for TX bills/resolutions for the 86 session in the v2 API. I am opening this issue because it is specific to the latest API, and previous issues were specific to the v1 API.\r\n\r\nI would like to have a conversation with someone from open-states who can explain your use of the scrapers for retrieving vote data, since it appears you need some help to update/fix these. We have some people that can probably be mobilized to help with this from [Open Austin](https://www.open-austin.org/]\r\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\nfrom .bills import TXBillScraper\n# from .committees import TXCommitteeScraper\n# from .events import TXEventScraper\nfrom .people import TXPersonScraper\n# from .votes import TXVoteScraper\n\n\nclass Texas(Jurisdiction):\n division_id = \"ocd-division/country:us/state:tx\"\n classification = \"government\"\n name = \"Texas\"\n url = \"https://capitol.texas.gov/\"\n scrapers = {\n 'people': TXPersonScraper,\n # 'committees': TXCommitteeScraper,\n 'bills': TXBillScraper,\n # Re-enable vote scraper when adding next regular session\n # 'votes': TXVoteScraper,\n # 'events': TXEventScraper\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"81(R) - 2009\",\n \"classification\": \"primary\",\n \"end_date\": \"2009-06-01\",\n \"identifier\": \"81\",\n \"name\": \"81st Legislature (2009)\",\n \"start_date\": \"2009-01-13\"\n },\n {\n \"_scraped_name\": \"81(1) - 2009\",\n \"classification\": \"special\",\n \"end_date\": \"2009-07-02\",\n \"identifier\": \"811\",\n \"name\": \"81st Legislature, 1st Called Session (2009)\",\n \"start_date\": \"2009-07-01\"\n },\n {\n \"_scraped_name\": \"82(R) - 2011\",\n \"classification\": \"primary\",\n \"end_date\": \"2011-05-30\",\n \"identifier\": \"82\",\n \"name\": \"82nd Legislature (2011)\",\n \"start_date\": \"2011-01-11\"\n },\n {\n \"_scraped_name\": \"82(1) - 2011\",\n \"classification\": \"special\",\n \"end_date\": \"2011-06-29\",\n \"identifier\": \"821\",\n \"name\": \"82nd Legislature, 1st Called Session (2011)\",\n \"start_date\": \"2011-05-31\"\n },\n {\n \"_scraped_name\": \"83(R) - 2013\",\n \"classification\": \"primary\",\n \"end_date\": \"2013-05-27\",\n \"identifier\": \"83\",\n \"name\": \"83rd Legislature (2013)\",\n \"start_date\": \"2013-01-08\"\n },\n {\n \"_scraped_name\": \"83(1) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-06-25\",\n \"identifier\": \"831\",\n \"name\": \"83nd Legislature, 1st Called Session (2013)\",\n \"start_date\": \"2013-05-27\"\n },\n {\n \"_scraped_name\": \"83(2) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-07-30\",\n \"identifier\": \"832\",\n \"name\": \"83nd Legislature, 2st Called Session (2013)\",\n \"start_date\": \"2013-07-01\"\n },\n {\n \"_scraped_name\": \"83(3) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-08-05\",\n \"identifier\": \"833\",\n \"name\": \"83nd Legislature, 3rd Called Session (2013)\",\n \"start_date\": \"2013-07-30\"\n },\n {\n \"_scraped_name\": \"84(R) - 2015\",\n \"classification\": \"primary\",\n \"end_date\": \"2015-06-01\",\n \"identifier\": \"84\",\n \"name\": \"84th Legislature (2015)\",\n \"start_date\": \"2015-01-13\"\n },\n {\n \"_scraped_name\": \"85(R) - 2017\",\n \"classification\": \"primary\",\n \"end_date\": \"2017-06-01\",\n \"identifier\": \"85\",\n \"name\": \"85th Legislature (2017)\",\n \"start_date\": \"2017-01-13\"\n },\n {\n \"_scraped_name\": \"85(1) - 2017\",\n \"classification\": \"special\",\n \"end_date\": \"2017-08-19\",\n \"identifier\": \"851\",\n \"name\": \"85nd Legislature, 1st Called Session (2017)\",\n \"start_date\": \"2017-07-10\"\n },\n {\n \"_scraped_name\": \"86(R) - 2019\",\n \"classification\": \"primary\",\n \"end_date\": \"2019-05-27\",\n \"identifier\": \"86\",\n \"name\": \"86th Legislature (2019)\",\n \"start_date\": \"2019-01-08\"\n },\n # TODO: Re-enable vote scraper when adding next regular session\n ]\n ignored_scraped_sessions = [\n \"80(R) - 2007\",\n \"79(3) - 2006\",\n \"79(2) - 2005\",\n \"79(1) - 2005\",\n \"79(R) - 2005\",\n \"78(4) - 2004\",\n \"78(3) - 2003\",\n \"78(2) - 2003\",\n \"78(1) - 2003\",\n \"78(R) - 2003\",\n \"77(R) - 2001\",\n \"76(R) - 1999\",\n \"75(R) - 1997\",\n \"74(R) - 1995\",\n \"73(R) - 1993\",\n \"72(4) - 1992\",\n \"72(3) - 1992\",\n \"72(2) - 1991\",\n \"72(1) - 1991\",\n \"72(R) - 1991\",\n \"71(6) - 1990\",\n \"71(5) - 1990\",\n \"71(4) - 1990\",\n \"71(3) - 1990\",\n \"71(2) - 1989\",\n \"71(1) - 1989\",\n \"71(R) - 1989\"\n ]\n\n def get_session_list(self):\n return url_xpath('https://capitol.texas.gov/',\n '//select[@name=\"cboLegSess\"]/option/text()')\n\n def get_organizations(self):\n legislature_name = \"Texas Legislature\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield Organization(name='Office of the Governor', classification='executive')\n yield legislature\n yield upper\n yield lower\n", "path": "openstates/tx/__init__.py"}], "after_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\nfrom .bills import TXBillScraper\n# from .committees import TXCommitteeScraper\n# from .events import TXEventScraper\nfrom .people import TXPersonScraper\nfrom .votes import TXVoteScraper\n\n\nclass Texas(Jurisdiction):\n division_id = \"ocd-division/country:us/state:tx\"\n classification = \"government\"\n name = \"Texas\"\n url = \"https://capitol.texas.gov/\"\n scrapers = {\n 'people': TXPersonScraper,\n # 'committees': TXCommitteeScraper,\n 'bills': TXBillScraper,\n # Re-enable vote scraper when adding next regular session\n 'votes': TXVoteScraper,\n # 'events': TXEventScraper\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"81(R) - 2009\",\n \"classification\": \"primary\",\n \"end_date\": \"2009-06-01\",\n \"identifier\": \"81\",\n \"name\": \"81st Legislature (2009)\",\n \"start_date\": \"2009-01-13\"\n },\n {\n \"_scraped_name\": \"81(1) - 2009\",\n \"classification\": \"special\",\n \"end_date\": \"2009-07-02\",\n \"identifier\": \"811\",\n \"name\": \"81st Legislature, 1st Called Session (2009)\",\n \"start_date\": \"2009-07-01\"\n },\n {\n \"_scraped_name\": \"82(R) - 2011\",\n \"classification\": \"primary\",\n \"end_date\": \"2011-05-30\",\n \"identifier\": \"82\",\n \"name\": \"82nd Legislature (2011)\",\n \"start_date\": \"2011-01-11\"\n },\n {\n \"_scraped_name\": \"82(1) - 2011\",\n \"classification\": \"special\",\n \"end_date\": \"2011-06-29\",\n \"identifier\": \"821\",\n \"name\": \"82nd Legislature, 1st Called Session (2011)\",\n \"start_date\": \"2011-05-31\"\n },\n {\n \"_scraped_name\": \"83(R) - 2013\",\n \"classification\": \"primary\",\n \"end_date\": \"2013-05-27\",\n \"identifier\": \"83\",\n \"name\": \"83rd Legislature (2013)\",\n \"start_date\": \"2013-01-08\"\n },\n {\n \"_scraped_name\": \"83(1) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-06-25\",\n \"identifier\": \"831\",\n \"name\": \"83nd Legislature, 1st Called Session (2013)\",\n \"start_date\": \"2013-05-27\"\n },\n {\n \"_scraped_name\": \"83(2) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-07-30\",\n \"identifier\": \"832\",\n \"name\": \"83nd Legislature, 2st Called Session (2013)\",\n \"start_date\": \"2013-07-01\"\n },\n {\n \"_scraped_name\": \"83(3) - 2013\",\n \"classification\": \"special\",\n \"end_date\": \"2013-08-05\",\n \"identifier\": \"833\",\n \"name\": \"83nd Legislature, 3rd Called Session (2013)\",\n \"start_date\": \"2013-07-30\"\n },\n {\n \"_scraped_name\": \"84(R) - 2015\",\n \"classification\": \"primary\",\n \"end_date\": \"2015-06-01\",\n \"identifier\": \"84\",\n \"name\": \"84th Legislature (2015)\",\n \"start_date\": \"2015-01-13\"\n },\n {\n \"_scraped_name\": \"85(R) - 2017\",\n \"classification\": \"primary\",\n \"end_date\": \"2017-06-01\",\n \"identifier\": \"85\",\n \"name\": \"85th Legislature (2017)\",\n \"start_date\": \"2017-01-13\"\n },\n {\n \"_scraped_name\": \"85(1) - 2017\",\n \"classification\": \"special\",\n \"end_date\": \"2017-08-19\",\n \"identifier\": \"851\",\n \"name\": \"85nd Legislature, 1st Called Session (2017)\",\n \"start_date\": \"2017-07-10\"\n },\n {\n \"_scraped_name\": \"86(R) - 2019\",\n \"classification\": \"primary\",\n \"end_date\": \"2019-05-27\",\n \"identifier\": \"86\",\n \"name\": \"86th Legislature (2019)\",\n \"start_date\": \"2019-01-08\"\n },\n # TODO: Re-enable vote scraper when adding next regular session\n ]\n ignored_scraped_sessions = [\n \"80(R) - 2007\",\n \"79(3) - 2006\",\n \"79(2) - 2005\",\n \"79(1) - 2005\",\n \"79(R) - 2005\",\n \"78(4) - 2004\",\n \"78(3) - 2003\",\n \"78(2) - 2003\",\n \"78(1) - 2003\",\n \"78(R) - 2003\",\n \"77(R) - 2001\",\n \"76(R) - 1999\",\n \"75(R) - 1997\",\n \"74(R) - 1995\",\n \"73(R) - 1993\",\n \"72(4) - 1992\",\n \"72(3) - 1992\",\n \"72(2) - 1991\",\n \"72(1) - 1991\",\n \"72(R) - 1991\",\n \"71(6) - 1990\",\n \"71(5) - 1990\",\n \"71(4) - 1990\",\n \"71(3) - 1990\",\n \"71(2) - 1989\",\n \"71(1) - 1989\",\n \"71(R) - 1989\"\n ]\n\n def get_session_list(self):\n return url_xpath('https://capitol.texas.gov/',\n '//select[@name=\"cboLegSess\"]/option/text()')\n\n def get_organizations(self):\n legislature_name = \"Texas Legislature\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield Organization(name='Office of the Governor', classification='executive')\n yield legislature\n yield upper\n yield lower\n", "path": "openstates/tx/__init__.py"}]} | 2,623 | 197 |
gh_patches_debug_15407 | rasdani/github-patches | git_diff | nipy__nipype-3168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sgegraph only submits the next processing node
### Summary
the `SGEGraph` plugin isn't submitting the entire workflow only the next required step.
### Actual behavior
When calling `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` only the next processing node is submitted. subsequent calls are needed to iteratively processes subsequent nodes.
`workflow.run(plugin='SGE)` works as expected. submitting one job waiting until completion then submitting the next.
### Expected behavior
I would expect that `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` would submit all processing jobs at once with later jobs being held until the earlier required jobs are finished.
### How to replicate the behavior
I'm new to nipype so I'm not sure how to get sample/example data to provide a minimal example but I think that any of the workflows in the example folder would demonstrate this issue.
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
200207-13:43:08,275 nipype.utils INFO:
No new version available.
{'commit_hash': '792988f19',
'commit_source': 'installation',
'networkx_version': '2.4',
'nibabel_version': '3.0.1',
'nipype_version': '1.4.1',
'numpy_version': '1.18.1',
'pkg_path': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/lib/python3.8/site-packages/nipype',
'scipy_version': '1.3.2',
'sys_executable': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/bin/python',
'sys_platform': 'linux',
'sys_version': '3.8.1 (default, Jan 8 2020, 22:29:32) \n[GCC 7.3.0]',
'traits_version': '5.2.0'}
```
### Execution environment
Choose one
- My python environment outside container
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/pipeline/plugins/sgegraph.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Parallel workflow execution via SGE
3 """
4 import os
5 import sys
6
7 from ...interfaces.base import CommandLine
8 from .base import GraphPluginBase, logger
9
10
11 def node_completed_status(checknode):
12 """
13 A function to determine if a node has previously completed it's work
14 :param checknode: The node to check the run status
15 :return: boolean value True indicates that the node does not need to be run.
16 """
17 """ TODO: place this in the base.py file and refactor """
18 node_state_does_not_require_overwrite = checknode.overwrite is False or (
19 checknode.overwrite is None and not checknode._interface.always_run
20 )
21 hash_exists = False
22 try:
23 hash_exists, _, _, _ = checknode.hash_exists()
24 except Exception:
25 hash_exists = False
26 return hash_exists and node_state_does_not_require_overwrite
27
28
29 class SGEGraphPlugin(GraphPluginBase):
30 """Execute using SGE
31
32 The plugin_args input to run can be used to control the SGE execution.
33 Currently supported options are:
34
35 - template : template to use for batch job submission
36 - qsub_args : arguments to be prepended to the job execution script in the
37 qsub call
38
39 """
40
41 _template = """
42 #!/bin/bash
43 #$ -V
44 #$ -S /bin/bash
45 """
46
47 def __init__(self, **kwargs):
48 self._qsub_args = ""
49 self._dont_resubmit_completed_jobs = False
50 if "plugin_args" in kwargs and kwargs["plugin_args"]:
51 plugin_args = kwargs["plugin_args"]
52 if "template" in plugin_args:
53 self._template = plugin_args["template"]
54 if os.path.isfile(self._template):
55 self._template = open(self._template).read()
56 if "qsub_args" in plugin_args:
57 self._qsub_args = plugin_args["qsub_args"]
58 if "dont_resubmit_completed_jobs" in plugin_args:
59 self._dont_resubmit_completed_jobs = plugin_args[
60 "dont_resubmit_completed_jobs"
61 ]
62 super(SGEGraphPlugin, self).__init__(**kwargs)
63
64 def _submit_graph(self, pyfiles, dependencies, nodes):
65 def make_job_name(jobnumber, nodeslist):
66 """
67 - jobnumber: The index number of the job to create
68 - nodeslist: The name of the node being processed
69 - return: A string representing this job to be displayed by SGE
70 """
71 job_name = "j{0}_{1}".format(jobnumber, nodeslist[jobnumber]._id)
72 # Condition job_name to be a valid bash identifier (i.e. - is invalid)
73 job_name = job_name.replace("-", "_").replace(".", "_").replace(":", "_")
74 return job_name
75
76 batch_dir, _ = os.path.split(pyfiles[0])
77 submitjobsfile = os.path.join(batch_dir, "submit_jobs.sh")
78
79 cache_doneness_per_node = dict()
80 if (
81 self._dont_resubmit_completed_jobs
82 ): # A future parameter for controlling this behavior could be added here
83 for idx, pyscript in enumerate(pyfiles):
84 node = nodes[idx]
85 node_status_done = node_completed_status(node)
86
87 # if the node itself claims done, then check to ensure all
88 # dependancies are also done
89 if node_status_done and idx in dependencies:
90 for child_idx in dependencies[idx]:
91 if child_idx in cache_doneness_per_node:
92 child_status_done = cache_doneness_per_node[child_idx]
93 else:
94 child_status_done = node_completed_status(nodes[child_idx])
95 node_status_done = node_status_done and child_status_done
96
97 cache_doneness_per_node[idx] = node_status_done
98
99 with open(submitjobsfile, "wt") as fp:
100 fp.writelines("#!/usr/bin/env bash\n")
101 fp.writelines("# Condense format attempted\n")
102 for idx, pyscript in enumerate(pyfiles):
103 node = nodes[idx]
104 if cache_doneness_per_node.get(idx, False):
105 continue
106 else:
107 template, qsub_args = self._get_args(
108 node, ["template", "qsub_args"]
109 )
110
111 batch_dir, name = os.path.split(pyscript)
112 name = ".".join(name.split(".")[:-1])
113 batchscript = "\n".join(
114 (template, "%s %s" % (sys.executable, pyscript))
115 )
116 batchscriptfile = os.path.join(
117 batch_dir, "batchscript_%s.sh" % name
118 )
119
120 batchscriptoutfile = batchscriptfile + ".o"
121 batchscripterrfile = batchscriptfile + ".e"
122
123 with open(batchscriptfile, "wt") as batchfp:
124 batchfp.writelines(batchscript)
125 batchfp.close()
126 deps = ""
127 if idx in dependencies:
128 values = " "
129 for jobid in dependencies[idx]:
130 # Avoid dependancies of done jobs
131 if (
132 not self._dont_resubmit_completed_jobs
133 or not cache_doneness_per_node[jobid]
134 ):
135 values += "${{{0}}},".format(
136 make_job_name(jobid, nodes)
137 )
138 if (
139 values != " "
140 ): # i.e. if some jobs were added to dependency list
141 values = values.rstrip(",")
142 deps = "-hold_jid%s" % values
143 jobname = make_job_name(idx, nodes)
144 # Do not use default output locations if they are set in self._qsub_args
145 stderrFile = ""
146 if self._qsub_args.count("-e ") == 0:
147 stderrFile = "-e {errFile}".format(errFile=batchscripterrfile)
148 stdoutFile = ""
149 if self._qsub_args.count("-o ") == 0:
150 stdoutFile = "-o {outFile}".format(outFile=batchscriptoutfile)
151 full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\n".format(
152 jobNm=jobname,
153 outFileOption=stdoutFile,
154 errFileOption=stderrFile,
155 extraQSubArgs=qsub_args,
156 dependantIndex=deps,
157 batchscript=batchscriptfile,
158 )
159 fp.writelines(full_line)
160 cmd = CommandLine(
161 "bash",
162 environ=dict(os.environ),
163 resource_monitor=False,
164 terminal_output="allatonce",
165 )
166 cmd.inputs.args = "%s" % submitjobsfile
167 cmd.run()
168 logger.info("submitted all jobs to queue")
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py
--- a/nipype/pipeline/plugins/sgegraph.py
+++ b/nipype/pipeline/plugins/sgegraph.py
@@ -148,7 +148,7 @@
stdoutFile = ""
if self._qsub_args.count("-o ") == 0:
stdoutFile = "-o {outFile}".format(outFile=batchscriptoutfile)
- full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\n".format(
+ full_line = "{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '/^Your job/{{print $3}}')\n".format(
jobNm=jobname,
outFileOption=stdoutFile,
errFileOption=stderrFile,
| {"golden_diff": "diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py\n--- a/nipype/pipeline/plugins/sgegraph.py\n+++ b/nipype/pipeline/plugins/sgegraph.py\n@@ -148,7 +148,7 @@\n stdoutFile = \"\"\n if self._qsub_args.count(\"-o \") == 0:\n stdoutFile = \"-o {outFile}\".format(outFile=batchscriptoutfile)\n- full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\\n\".format(\n+ full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '/^Your job/{{print $3}}')\\n\".format(\n jobNm=jobname,\n outFileOption=stdoutFile,\n errFileOption=stderrFile,\n", "issue": "sgegraph only submits the next processing node\n### Summary\r\nthe `SGEGraph` plugin isn't submitting the entire workflow only the next required step.\r\n\r\n### Actual behavior\r\n\r\nWhen calling `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` only the next processing node is submitted. subsequent calls are needed to iteratively processes subsequent nodes.\r\n\r\n`workflow.run(plugin='SGE)` works as expected. submitting one job waiting until completion then submitting the next.\r\n\r\n### Expected behavior\r\nI would expect that `workflow.run(plugin='SGEGraph', plugin_args = {'dont_resubmit_completed_jobs': True})` would submit all processing jobs at once with later jobs being held until the earlier required jobs are finished.\r\n\r\n### How to replicate the behavior\r\nI'm new to nipype so I'm not sure how to get sample/example data to provide a minimal example but I think that any of the workflows in the example folder would demonstrate this issue.\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n200207-13:43:08,275 nipype.utils INFO:\r\n\t No new version available.\r\n{'commit_hash': '792988f19',\r\n 'commit_source': 'installation',\r\n 'networkx_version': '2.4',\r\n 'nibabel_version': '3.0.1',\r\n 'nipype_version': '1.4.1',\r\n 'numpy_version': '1.18.1',\r\n 'pkg_path': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/lib/python3.8/site-packages/nipype',\r\n 'scipy_version': '1.3.2',\r\n 'sys_executable': '/mnt/isilon/meg_lab/Linux_software/anaconda3_lab/envs/nipype/bin/python',\r\n 'sys_platform': 'linux',\r\n 'sys_version': '3.8.1 (default, Jan 8 2020, 22:29:32) \\n[GCC 7.3.0]',\r\n 'traits_version': '5.2.0'}\r\n```\r\n\r\n### Execution environment\r\n\r\nChoose one\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Parallel workflow execution via SGE\n\"\"\"\nimport os\nimport sys\n\nfrom ...interfaces.base import CommandLine\nfrom .base import GraphPluginBase, logger\n\n\ndef node_completed_status(checknode):\n \"\"\"\n A function to determine if a node has previously completed it's work\n :param checknode: The node to check the run status\n :return: boolean value True indicates that the node does not need to be run.\n \"\"\"\n \"\"\" TODO: place this in the base.py file and refactor \"\"\"\n node_state_does_not_require_overwrite = checknode.overwrite is False or (\n checknode.overwrite is None and not checknode._interface.always_run\n )\n hash_exists = False\n try:\n hash_exists, _, _, _ = checknode.hash_exists()\n except Exception:\n hash_exists = False\n return hash_exists and node_state_does_not_require_overwrite\n\n\nclass SGEGraphPlugin(GraphPluginBase):\n \"\"\"Execute using SGE\n\n The plugin_args input to run can be used to control the SGE execution.\n Currently supported options are:\n\n - template : template to use for batch job submission\n - qsub_args : arguments to be prepended to the job execution script in the\n qsub call\n\n \"\"\"\n\n _template = \"\"\"\n#!/bin/bash\n#$ -V\n#$ -S /bin/bash\n\"\"\"\n\n def __init__(self, **kwargs):\n self._qsub_args = \"\"\n self._dont_resubmit_completed_jobs = False\n if \"plugin_args\" in kwargs and kwargs[\"plugin_args\"]:\n plugin_args = kwargs[\"plugin_args\"]\n if \"template\" in plugin_args:\n self._template = plugin_args[\"template\"]\n if os.path.isfile(self._template):\n self._template = open(self._template).read()\n if \"qsub_args\" in plugin_args:\n self._qsub_args = plugin_args[\"qsub_args\"]\n if \"dont_resubmit_completed_jobs\" in plugin_args:\n self._dont_resubmit_completed_jobs = plugin_args[\n \"dont_resubmit_completed_jobs\"\n ]\n super(SGEGraphPlugin, self).__init__(**kwargs)\n\n def _submit_graph(self, pyfiles, dependencies, nodes):\n def make_job_name(jobnumber, nodeslist):\n \"\"\"\n - jobnumber: The index number of the job to create\n - nodeslist: The name of the node being processed\n - return: A string representing this job to be displayed by SGE\n \"\"\"\n job_name = \"j{0}_{1}\".format(jobnumber, nodeslist[jobnumber]._id)\n # Condition job_name to be a valid bash identifier (i.e. - is invalid)\n job_name = job_name.replace(\"-\", \"_\").replace(\".\", \"_\").replace(\":\", \"_\")\n return job_name\n\n batch_dir, _ = os.path.split(pyfiles[0])\n submitjobsfile = os.path.join(batch_dir, \"submit_jobs.sh\")\n\n cache_doneness_per_node = dict()\n if (\n self._dont_resubmit_completed_jobs\n ): # A future parameter for controlling this behavior could be added here\n for idx, pyscript in enumerate(pyfiles):\n node = nodes[idx]\n node_status_done = node_completed_status(node)\n\n # if the node itself claims done, then check to ensure all\n # dependancies are also done\n if node_status_done and idx in dependencies:\n for child_idx in dependencies[idx]:\n if child_idx in cache_doneness_per_node:\n child_status_done = cache_doneness_per_node[child_idx]\n else:\n child_status_done = node_completed_status(nodes[child_idx])\n node_status_done = node_status_done and child_status_done\n\n cache_doneness_per_node[idx] = node_status_done\n\n with open(submitjobsfile, \"wt\") as fp:\n fp.writelines(\"#!/usr/bin/env bash\\n\")\n fp.writelines(\"# Condense format attempted\\n\")\n for idx, pyscript in enumerate(pyfiles):\n node = nodes[idx]\n if cache_doneness_per_node.get(idx, False):\n continue\n else:\n template, qsub_args = self._get_args(\n node, [\"template\", \"qsub_args\"]\n )\n\n batch_dir, name = os.path.split(pyscript)\n name = \".\".join(name.split(\".\")[:-1])\n batchscript = \"\\n\".join(\n (template, \"%s %s\" % (sys.executable, pyscript))\n )\n batchscriptfile = os.path.join(\n batch_dir, \"batchscript_%s.sh\" % name\n )\n\n batchscriptoutfile = batchscriptfile + \".o\"\n batchscripterrfile = batchscriptfile + \".e\"\n\n with open(batchscriptfile, \"wt\") as batchfp:\n batchfp.writelines(batchscript)\n batchfp.close()\n deps = \"\"\n if idx in dependencies:\n values = \" \"\n for jobid in dependencies[idx]:\n # Avoid dependancies of done jobs\n if (\n not self._dont_resubmit_completed_jobs\n or not cache_doneness_per_node[jobid]\n ):\n values += \"${{{0}}},\".format(\n make_job_name(jobid, nodes)\n )\n if (\n values != \" \"\n ): # i.e. if some jobs were added to dependency list\n values = values.rstrip(\",\")\n deps = \"-hold_jid%s\" % values\n jobname = make_job_name(idx, nodes)\n # Do not use default output locations if they are set in self._qsub_args\n stderrFile = \"\"\n if self._qsub_args.count(\"-e \") == 0:\n stderrFile = \"-e {errFile}\".format(errFile=batchscripterrfile)\n stdoutFile = \"\"\n if self._qsub_args.count(\"-o \") == 0:\n stdoutFile = \"-o {outFile}\".format(outFile=batchscriptoutfile)\n full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '{{print $3}}')\\n\".format(\n jobNm=jobname,\n outFileOption=stdoutFile,\n errFileOption=stderrFile,\n extraQSubArgs=qsub_args,\n dependantIndex=deps,\n batchscript=batchscriptfile,\n )\n fp.writelines(full_line)\n cmd = CommandLine(\n \"bash\",\n environ=dict(os.environ),\n resource_monitor=False,\n terminal_output=\"allatonce\",\n )\n cmd.inputs.args = \"%s\" % submitjobsfile\n cmd.run()\n logger.info(\"submitted all jobs to queue\")\n", "path": "nipype/pipeline/plugins/sgegraph.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Parallel workflow execution via SGE\n\"\"\"\nimport os\nimport sys\n\nfrom ...interfaces.base import CommandLine\nfrom .base import GraphPluginBase, logger\n\n\ndef node_completed_status(checknode):\n \"\"\"\n A function to determine if a node has previously completed it's work\n :param checknode: The node to check the run status\n :return: boolean value True indicates that the node does not need to be run.\n \"\"\"\n \"\"\" TODO: place this in the base.py file and refactor \"\"\"\n node_state_does_not_require_overwrite = checknode.overwrite is False or (\n checknode.overwrite is None and not checknode._interface.always_run\n )\n hash_exists = False\n try:\n hash_exists, _, _, _ = checknode.hash_exists()\n except Exception:\n hash_exists = False\n return hash_exists and node_state_does_not_require_overwrite\n\n\nclass SGEGraphPlugin(GraphPluginBase):\n \"\"\"Execute using SGE\n\n The plugin_args input to run can be used to control the SGE execution.\n Currently supported options are:\n\n - template : template to use for batch job submission\n - qsub_args : arguments to be prepended to the job execution script in the\n qsub call\n\n \"\"\"\n\n _template = \"\"\"\n#!/bin/bash\n#$ -V\n#$ -S /bin/bash\n\"\"\"\n\n def __init__(self, **kwargs):\n self._qsub_args = \"\"\n self._dont_resubmit_completed_jobs = False\n if \"plugin_args\" in kwargs and kwargs[\"plugin_args\"]:\n plugin_args = kwargs[\"plugin_args\"]\n if \"template\" in plugin_args:\n self._template = plugin_args[\"template\"]\n if os.path.isfile(self._template):\n self._template = open(self._template).read()\n if \"qsub_args\" in plugin_args:\n self._qsub_args = plugin_args[\"qsub_args\"]\n if \"dont_resubmit_completed_jobs\" in plugin_args:\n self._dont_resubmit_completed_jobs = plugin_args[\n \"dont_resubmit_completed_jobs\"\n ]\n super(SGEGraphPlugin, self).__init__(**kwargs)\n\n def _submit_graph(self, pyfiles, dependencies, nodes):\n def make_job_name(jobnumber, nodeslist):\n \"\"\"\n - jobnumber: The index number of the job to create\n - nodeslist: The name of the node being processed\n - return: A string representing this job to be displayed by SGE\n \"\"\"\n job_name = \"j{0}_{1}\".format(jobnumber, nodeslist[jobnumber]._id)\n # Condition job_name to be a valid bash identifier (i.e. - is invalid)\n job_name = job_name.replace(\"-\", \"_\").replace(\".\", \"_\").replace(\":\", \"_\")\n return job_name\n\n batch_dir, _ = os.path.split(pyfiles[0])\n submitjobsfile = os.path.join(batch_dir, \"submit_jobs.sh\")\n\n cache_doneness_per_node = dict()\n if (\n self._dont_resubmit_completed_jobs\n ): # A future parameter for controlling this behavior could be added here\n for idx, pyscript in enumerate(pyfiles):\n node = nodes[idx]\n node_status_done = node_completed_status(node)\n\n # if the node itself claims done, then check to ensure all\n # dependancies are also done\n if node_status_done and idx in dependencies:\n for child_idx in dependencies[idx]:\n if child_idx in cache_doneness_per_node:\n child_status_done = cache_doneness_per_node[child_idx]\n else:\n child_status_done = node_completed_status(nodes[child_idx])\n node_status_done = node_status_done and child_status_done\n\n cache_doneness_per_node[idx] = node_status_done\n\n with open(submitjobsfile, \"wt\") as fp:\n fp.writelines(\"#!/usr/bin/env bash\\n\")\n fp.writelines(\"# Condense format attempted\\n\")\n for idx, pyscript in enumerate(pyfiles):\n node = nodes[idx]\n if cache_doneness_per_node.get(idx, False):\n continue\n else:\n template, qsub_args = self._get_args(\n node, [\"template\", \"qsub_args\"]\n )\n\n batch_dir, name = os.path.split(pyscript)\n name = \".\".join(name.split(\".\")[:-1])\n batchscript = \"\\n\".join(\n (template, \"%s %s\" % (sys.executable, pyscript))\n )\n batchscriptfile = os.path.join(\n batch_dir, \"batchscript_%s.sh\" % name\n )\n\n batchscriptoutfile = batchscriptfile + \".o\"\n batchscripterrfile = batchscriptfile + \".e\"\n\n with open(batchscriptfile, \"wt\") as batchfp:\n batchfp.writelines(batchscript)\n batchfp.close()\n deps = \"\"\n if idx in dependencies:\n values = \" \"\n for jobid in dependencies[idx]:\n # Avoid dependancies of done jobs\n if (\n not self._dont_resubmit_completed_jobs\n or not cache_doneness_per_node[jobid]\n ):\n values += \"${{{0}}},\".format(\n make_job_name(jobid, nodes)\n )\n if (\n values != \" \"\n ): # i.e. if some jobs were added to dependency list\n values = values.rstrip(\",\")\n deps = \"-hold_jid%s\" % values\n jobname = make_job_name(idx, nodes)\n # Do not use default output locations if they are set in self._qsub_args\n stderrFile = \"\"\n if self._qsub_args.count(\"-e \") == 0:\n stderrFile = \"-e {errFile}\".format(errFile=batchscripterrfile)\n stdoutFile = \"\"\n if self._qsub_args.count(\"-o \") == 0:\n stdoutFile = \"-o {outFile}\".format(outFile=batchscriptoutfile)\n full_line = \"{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk '/^Your job/{{print $3}}')\\n\".format(\n jobNm=jobname,\n outFileOption=stdoutFile,\n errFileOption=stderrFile,\n extraQSubArgs=qsub_args,\n dependantIndex=deps,\n batchscript=batchscriptfile,\n )\n fp.writelines(full_line)\n cmd = CommandLine(\n \"bash\",\n environ=dict(os.environ),\n resource_monitor=False,\n terminal_output=\"allatonce\",\n )\n cmd.inputs.args = \"%s\" % submitjobsfile\n cmd.run()\n logger.info(\"submitted all jobs to queue\")\n", "path": "nipype/pipeline/plugins/sgegraph.py"}]} | 2,610 | 241 |
gh_patches_debug_36004 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5336 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py`
Content:
```
1 import copy
2 import logging
3 from typing import Any, Dict
4
5 import numpy as np
6 import pandas as pd
7
8 from great_expectations.core.util import (
9 convert_to_json_serializable,
10 get_sql_dialect_floating_point_infinity_value,
11 )
12 from great_expectations.execution_engine import (
13 PandasExecutionEngine,
14 SparkDFExecutionEngine,
15 SqlAlchemyExecutionEngine,
16 )
17 from great_expectations.execution_engine.execution_engine import MetricDomainTypes
18 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
19 ColumnAggregateMetricProvider,
20 )
21 from great_expectations.expectations.metrics.import_manager import Bucketizer, F, sa
22 from great_expectations.expectations.metrics.metric_provider import metric_value
23
24 logger = logging.getLogger(__name__)
25
26
27 class ColumnHistogram(ColumnAggregateMetricProvider):
28 metric_name = "column.histogram"
29 value_keys = ("bins",)
30
31 @metric_value(engine=PandasExecutionEngine)
32 def _pandas(
33 cls,
34 execution_engine: PandasExecutionEngine,
35 metric_domain_kwargs: Dict,
36 metric_value_kwargs: Dict,
37 metrics: Dict[str, Any],
38 runtime_configuration: Dict,
39 ):
40 df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
41 domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
42 )
43 column = accessor_domain_kwargs["column"]
44 bins = metric_value_kwargs["bins"]
45 column_series: pd.Series = df[column]
46 column_null_elements_cond: pd.Series = column_series.isnull()
47 column_nonnull_elements: pd.Series = column_series[~column_null_elements_cond]
48 hist, bin_edges = np.histogram(column_nonnull_elements, bins, density=False)
49 return list(hist)
50
51 @metric_value(engine=SqlAlchemyExecutionEngine)
52 def _sqlalchemy(
53 cls,
54 execution_engine: SqlAlchemyExecutionEngine,
55 metric_domain_kwargs: Dict,
56 metric_value_kwargs: Dict,
57 metrics: Dict[str, Any],
58 runtime_configuration: Dict,
59 ):
60 """return a list of counts corresponding to bins
61
62 Args:
63 column: the name of the column for which to get the histogram
64 bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
65 """
66 selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
67 domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
68 )
69 column = accessor_domain_kwargs["column"]
70 bins = metric_value_kwargs["bins"]
71
72 case_conditions = []
73 idx = 0
74 if isinstance(bins, np.ndarray):
75 bins = bins.tolist()
76 else:
77 bins = list(bins)
78
79 # If we have an infinite lower bound, don't express that in sql
80 if (
81 bins[0]
82 == get_sql_dialect_floating_point_infinity_value(
83 schema="api_np", negative=True
84 )
85 ) or (
86 bins[0]
87 == get_sql_dialect_floating_point_infinity_value(
88 schema="api_cast", negative=True
89 )
90 ):
91 case_conditions.append(
92 sa.func.sum(
93 sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)
94 ).label(f"bin_{str(idx)}")
95 )
96 idx += 1
97
98 for idx in range(idx, len(bins) - 2):
99 case_conditions.append(
100 sa.func.sum(
101 sa.case(
102 [
103 (
104 sa.and_(
105 bins[idx] <= sa.column(column),
106 sa.column(column) < bins[idx + 1],
107 ),
108 1,
109 )
110 ],
111 else_=0,
112 )
113 ).label(f"bin_{str(idx)}")
114 )
115
116 if (
117 bins[-1]
118 == get_sql_dialect_floating_point_infinity_value(
119 schema="api_np", negative=False
120 )
121 ) or (
122 bins[-1]
123 == get_sql_dialect_floating_point_infinity_value(
124 schema="api_cast", negative=False
125 )
126 ):
127 case_conditions.append(
128 sa.func.sum(
129 sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
130 ).label(f"bin_{str(len(bins) - 1)}")
131 )
132 else:
133 case_conditions.append(
134 sa.func.sum(
135 sa.case(
136 [
137 (
138 sa.and_(
139 bins[-2] <= sa.column(column),
140 sa.column(column) <= bins[-1],
141 ),
142 1,
143 )
144 ],
145 else_=0,
146 )
147 ).label(f"bin_{str(len(bins) - 1)}")
148 )
149
150 query = (
151 sa.select(case_conditions)
152 .where(
153 sa.column(column) != None,
154 )
155 .select_from(selectable)
156 )
157
158 # Run the data through convert_to_json_serializable to ensure we do not have Decimal types
159 hist = convert_to_json_serializable(
160 list(execution_engine.engine.execute(query).fetchone())
161 )
162 return hist
163
164 @metric_value(engine=SparkDFExecutionEngine)
165 def _spark(
166 cls,
167 execution_engine: SparkDFExecutionEngine,
168 metric_domain_kwargs: Dict,
169 metric_value_kwargs: Dict,
170 metrics: Dict[str, Any],
171 runtime_configuration: Dict,
172 ):
173 df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
174 domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
175 )
176 bins = metric_value_kwargs["bins"]
177 column = metric_domain_kwargs["column"]
178
179 """return a list of counts corresponding to bins"""
180 bins = list(
181 copy.deepcopy(bins)
182 ) # take a copy since we are inserting and popping
183 if bins[0] == -np.inf or bins[0] == -float("inf"):
184 added_min = False
185 bins[0] = -float("inf")
186 else:
187 added_min = True
188 bins.insert(0, -float("inf"))
189
190 if bins[-1] == np.inf or bins[-1] == float("inf"):
191 added_max = False
192 bins[-1] = float("inf")
193 else:
194 added_max = True
195 bins.append(float("inf"))
196
197 temp_column = df.select(column).where(F.col(column).isNotNull())
198 bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol="buckets")
199 bucketed = bucketizer.setHandleInvalid("skip").transform(temp_column)
200
201 # This is painful to do, but: bucketizer cannot handle values outside of a range
202 # (hence adding -/+ infinity above)
203
204 # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound
205 # for all but the last bin
206
207 # But, since the last bin in our case will often be +infinity, we need to
208 # find the number of values exactly equal to the upper bound to add those
209
210 # We'll try for an optimization by asking for it at the same time
211 if added_max:
212 upper_bound_count = (
213 temp_column.select(column).filter(F.col(column) == bins[-2]).count()
214 )
215 else:
216 upper_bound_count = 0
217
218 hist_rows = bucketed.groupBy("buckets").count().collect()
219 # Spark only returns buckets that have nonzero counts.
220 hist = [0] * (len(bins) - 1)
221 for row in hist_rows:
222 hist[int(row["buckets"])] = row["count"]
223
224 hist[-2] += upper_bound_count
225
226 if added_min:
227 below_bins = hist.pop(0)
228 bins.pop(0)
229 if below_bins > 0:
230 logger.warning("Discarding histogram values below lowest bin.")
231
232 if added_max:
233 above_bins = hist.pop(-1)
234 bins.pop(-1)
235 if above_bins > 0:
236 logger.warning("Discarding histogram values above highest bin.")
237
238 return hist
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py
@@ -95,15 +95,19 @@
)
idx += 1
+ negative_boundary: float
+ positive_boundary: float
for idx in range(idx, len(bins) - 2):
+ negative_boundary = float(bins[idx])
+ positive_boundary = float(bins[idx + 1])
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
- bins[idx] <= sa.column(column),
- sa.column(column) < bins[idx + 1],
+ negative_boundary <= sa.column(column),
+ sa.column(column) < positive_boundary,
),
1,
)
@@ -124,20 +128,23 @@
schema="api_cast", negative=False
)
):
+ negative_boundary = float(bins[-2])
case_conditions.append(
sa.func.sum(
- sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
+ sa.case([(negative_boundary <= sa.column(column), 1)], else_=0)
).label(f"bin_{str(len(bins) - 1)}")
)
else:
+ negative_boundary = float(bins[-2])
+ positive_boundary = float(bins[-1])
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
- bins[-2] <= sa.column(column),
- sa.column(column) <= bins[-1],
+ negative_boundary <= sa.column(column),
+ sa.column(column) <= positive_boundary,
),
1,
)
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py\n@@ -95,15 +95,19 @@\n )\n idx += 1\n \n+ negative_boundary: float\n+ positive_boundary: float\n for idx in range(idx, len(bins) - 2):\n+ negative_boundary = float(bins[idx])\n+ positive_boundary = float(bins[idx + 1])\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n- bins[idx] <= sa.column(column),\n- sa.column(column) < bins[idx + 1],\n+ negative_boundary <= sa.column(column),\n+ sa.column(column) < positive_boundary,\n ),\n 1,\n )\n@@ -124,20 +128,23 @@\n schema=\"api_cast\", negative=False\n )\n ):\n+ negative_boundary = float(bins[-2])\n case_conditions.append(\n sa.func.sum(\n- sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)\n+ sa.case([(negative_boundary <= sa.column(column), 1)], else_=0)\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n else:\n+ negative_boundary = float(bins[-2])\n+ positive_boundary = float(bins[-1])\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n- bins[-2] <= sa.column(column),\n- sa.column(column) <= bins[-1],\n+ negative_boundary <= sa.column(column),\n+ sa.column(column) <= positive_boundary,\n ),\n 1,\n )\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import copy\nimport logging\nfrom typing import Any, Dict\n\nimport numpy as np\nimport pandas as pd\n\nfrom great_expectations.core.util import (\n convert_to_json_serializable,\n get_sql_dialect_floating_point_infinity_value,\n)\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n)\nfrom great_expectations.expectations.metrics.import_manager import Bucketizer, F, sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\n\nlogger = logging.getLogger(__name__)\n\n\nclass ColumnHistogram(ColumnAggregateMetricProvider):\n metric_name = \"column.histogram\"\n value_keys = (\"bins\",)\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n column = accessor_domain_kwargs[\"column\"]\n bins = metric_value_kwargs[\"bins\"]\n column_series: pd.Series = df[column]\n column_null_elements_cond: pd.Series = column_series.isnull()\n column_nonnull_elements: pd.Series = column_series[~column_null_elements_cond]\n hist, bin_edges = np.histogram(column_nonnull_elements, bins, density=False)\n return list(hist)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n \"\"\"return a list of counts corresponding to bins\n\n Args:\n column: the name of the column for which to get the histogram\n bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching\n \"\"\"\n selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n column = accessor_domain_kwargs[\"column\"]\n bins = metric_value_kwargs[\"bins\"]\n\n case_conditions = []\n idx = 0\n if isinstance(bins, np.ndarray):\n bins = bins.tolist()\n else:\n bins = list(bins)\n\n # If we have an infinite lower bound, don't express that in sql\n if (\n bins[0]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_np\", negative=True\n )\n ) or (\n bins[0]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_cast\", negative=True\n )\n ):\n case_conditions.append(\n sa.func.sum(\n sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)\n ).label(f\"bin_{str(idx)}\")\n )\n idx += 1\n\n for idx in range(idx, len(bins) - 2):\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n bins[idx] <= sa.column(column),\n sa.column(column) < bins[idx + 1],\n ),\n 1,\n )\n ],\n else_=0,\n )\n ).label(f\"bin_{str(idx)}\")\n )\n\n if (\n bins[-1]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_np\", negative=False\n )\n ) or (\n bins[-1]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_cast\", negative=False\n )\n ):\n case_conditions.append(\n sa.func.sum(\n sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n else:\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n bins[-2] <= sa.column(column),\n sa.column(column) <= bins[-1],\n ),\n 1,\n )\n ],\n else_=0,\n )\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n\n query = (\n sa.select(case_conditions)\n .where(\n sa.column(column) != None,\n )\n .select_from(selectable)\n )\n\n # Run the data through convert_to_json_serializable to ensure we do not have Decimal types\n hist = convert_to_json_serializable(\n list(execution_engine.engine.execute(query).fetchone())\n )\n return hist\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n bins = metric_value_kwargs[\"bins\"]\n column = metric_domain_kwargs[\"column\"]\n\n \"\"\"return a list of counts corresponding to bins\"\"\"\n bins = list(\n copy.deepcopy(bins)\n ) # take a copy since we are inserting and popping\n if bins[0] == -np.inf or bins[0] == -float(\"inf\"):\n added_min = False\n bins[0] = -float(\"inf\")\n else:\n added_min = True\n bins.insert(0, -float(\"inf\"))\n\n if bins[-1] == np.inf or bins[-1] == float(\"inf\"):\n added_max = False\n bins[-1] = float(\"inf\")\n else:\n added_max = True\n bins.append(float(\"inf\"))\n\n temp_column = df.select(column).where(F.col(column).isNotNull())\n bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol=\"buckets\")\n bucketed = bucketizer.setHandleInvalid(\"skip\").transform(temp_column)\n\n # This is painful to do, but: bucketizer cannot handle values outside of a range\n # (hence adding -/+ infinity above)\n\n # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound\n # for all but the last bin\n\n # But, since the last bin in our case will often be +infinity, we need to\n # find the number of values exactly equal to the upper bound to add those\n\n # We'll try for an optimization by asking for it at the same time\n if added_max:\n upper_bound_count = (\n temp_column.select(column).filter(F.col(column) == bins[-2]).count()\n )\n else:\n upper_bound_count = 0\n\n hist_rows = bucketed.groupBy(\"buckets\").count().collect()\n # Spark only returns buckets that have nonzero counts.\n hist = [0] * (len(bins) - 1)\n for row in hist_rows:\n hist[int(row[\"buckets\"])] = row[\"count\"]\n\n hist[-2] += upper_bound_count\n\n if added_min:\n below_bins = hist.pop(0)\n bins.pop(0)\n if below_bins > 0:\n logger.warning(\"Discarding histogram values below lowest bin.\")\n\n if added_max:\n above_bins = hist.pop(-1)\n bins.pop(-1)\n if above_bins > 0:\n logger.warning(\"Discarding histogram values above highest bin.\")\n\n return hist\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py"}], "after_files": [{"content": "import copy\nimport logging\nfrom typing import Any, Dict\n\nimport numpy as np\nimport pandas as pd\n\nfrom great_expectations.core.util import (\n convert_to_json_serializable,\n get_sql_dialect_floating_point_infinity_value,\n)\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n)\nfrom great_expectations.expectations.metrics.import_manager import Bucketizer, F, sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\n\nlogger = logging.getLogger(__name__)\n\n\nclass ColumnHistogram(ColumnAggregateMetricProvider):\n metric_name = \"column.histogram\"\n value_keys = (\"bins\",)\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n column = accessor_domain_kwargs[\"column\"]\n bins = metric_value_kwargs[\"bins\"]\n column_series: pd.Series = df[column]\n column_null_elements_cond: pd.Series = column_series.isnull()\n column_nonnull_elements: pd.Series = column_series[~column_null_elements_cond]\n hist, bin_edges = np.histogram(column_nonnull_elements, bins, density=False)\n return list(hist)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n \"\"\"return a list of counts corresponding to bins\n\n Args:\n column: the name of the column for which to get the histogram\n bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching\n \"\"\"\n selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n column = accessor_domain_kwargs[\"column\"]\n bins = metric_value_kwargs[\"bins\"]\n\n case_conditions = []\n idx = 0\n if isinstance(bins, np.ndarray):\n bins = bins.tolist()\n else:\n bins = list(bins)\n\n # If we have an infinite lower bound, don't express that in sql\n if (\n bins[0]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_np\", negative=True\n )\n ) or (\n bins[0]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_cast\", negative=True\n )\n ):\n case_conditions.append(\n sa.func.sum(\n sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)\n ).label(f\"bin_{str(idx)}\")\n )\n idx += 1\n\n negative_boundary: float\n positive_boundary: float\n for idx in range(idx, len(bins) - 2):\n negative_boundary = float(bins[idx])\n positive_boundary = float(bins[idx + 1])\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n negative_boundary <= sa.column(column),\n sa.column(column) < positive_boundary,\n ),\n 1,\n )\n ],\n else_=0,\n )\n ).label(f\"bin_{str(idx)}\")\n )\n\n if (\n bins[-1]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_np\", negative=False\n )\n ) or (\n bins[-1]\n == get_sql_dialect_floating_point_infinity_value(\n schema=\"api_cast\", negative=False\n )\n ):\n negative_boundary = float(bins[-2])\n case_conditions.append(\n sa.func.sum(\n sa.case([(negative_boundary <= sa.column(column), 1)], else_=0)\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n else:\n negative_boundary = float(bins[-2])\n positive_boundary = float(bins[-1])\n case_conditions.append(\n sa.func.sum(\n sa.case(\n [\n (\n sa.and_(\n negative_boundary <= sa.column(column),\n sa.column(column) <= positive_boundary,\n ),\n 1,\n )\n ],\n else_=0,\n )\n ).label(f\"bin_{str(len(bins) - 1)}\")\n )\n\n query = (\n sa.select(case_conditions)\n .where(\n sa.column(column) != None,\n )\n .select_from(selectable)\n )\n\n # Run the data through convert_to_json_serializable to ensure we do not have Decimal types\n hist = convert_to_json_serializable(\n list(execution_engine.engine.execute(query).fetchone())\n )\n return hist\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN\n )\n bins = metric_value_kwargs[\"bins\"]\n column = metric_domain_kwargs[\"column\"]\n\n \"\"\"return a list of counts corresponding to bins\"\"\"\n bins = list(\n copy.deepcopy(bins)\n ) # take a copy since we are inserting and popping\n if bins[0] == -np.inf or bins[0] == -float(\"inf\"):\n added_min = False\n bins[0] = -float(\"inf\")\n else:\n added_min = True\n bins.insert(0, -float(\"inf\"))\n\n if bins[-1] == np.inf or bins[-1] == float(\"inf\"):\n added_max = False\n bins[-1] = float(\"inf\")\n else:\n added_max = True\n bins.append(float(\"inf\"))\n\n temp_column = df.select(column).where(F.col(column).isNotNull())\n bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol=\"buckets\")\n bucketed = bucketizer.setHandleInvalid(\"skip\").transform(temp_column)\n\n # This is painful to do, but: bucketizer cannot handle values outside of a range\n # (hence adding -/+ infinity above)\n\n # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound\n # for all but the last bin\n\n # But, since the last bin in our case will often be +infinity, we need to\n # find the number of values exactly equal to the upper bound to add those\n\n # We'll try for an optimization by asking for it at the same time\n if added_max:\n upper_bound_count = (\n temp_column.select(column).filter(F.col(column) == bins[-2]).count()\n )\n else:\n upper_bound_count = 0\n\n hist_rows = bucketed.groupBy(\"buckets\").count().collect()\n # Spark only returns buckets that have nonzero counts.\n hist = [0] * (len(bins) - 1)\n for row in hist_rows:\n hist[int(row[\"buckets\"])] = row[\"count\"]\n\n hist[-2] += upper_bound_count\n\n if added_min:\n below_bins = hist.pop(0)\n bins.pop(0)\n if below_bins > 0:\n logger.warning(\"Discarding histogram values below lowest bin.\")\n\n if added_max:\n above_bins = hist.pop(-1)\n bins.pop(-1)\n if above_bins > 0:\n logger.warning(\"Discarding histogram values above highest bin.\")\n\n return hist\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py"}]} | 2,603 | 429 |
gh_patches_debug_5459 | rasdani/github-patches | git_diff | conan-io__conan-3033 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] SystemPackageTool doesn't correctly detect when apt packages are installed
**Summary**
`SystemPackageTool` uses `dpkg -s <package_name>` to detect whether a package is installed via apt. This is not a sufficient check, and fails to detect that a package has been uninstalled.
Line where the issue is:
https://github.com/conan-io/conan/blob/d508c44031542c95051d6036047ce156b935e8bc/conans/client/tools/system_pm.py#L134
**Reproduction**
On a fresh Ubuntu 16.04 box without the `libudev-dev` package installed, `dpkg -s` gives this behaviour:
```bash
vagrant@ubuntu-xenial:~$ dpkg -s libudev-dev
dpkg-query: package 'libudev-dev' is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
vagrant@ubuntu-xenial:~$ echo $?
1
```
The return value is `1`, so `SystemPackageTool` correctly detects that the package is not installed.
If the package is installed with `sudo apt-get install libudev-dev` then `dpkg -s` gives this behaviour:
```bash
vagrant@ubuntu-xenial:~$ dpkg -s libudev-dev
Package: libudev-dev
Status: install ok installed
Priority: optional
Section: libdevel
Installed-Size: 527
Maintainer: Ubuntu Developers <[email protected]>
Architecture: amd64
Multi-Arch: same
Source: systemd
Version: 229-4ubuntu21.2
Depends: libacl1 (>= 2.2.51-8), libblkid1 (>= 2.19.1), libc6 (>= 2.17), libkmod2 (>= 5~), libselinux1 (>= 2.0.65), libudev1 (= 229-4ubuntu21.2)
Pre-Depends: dpkg (>= 1.17.14)
Description: libudev development files
This package contains the files needed for developing applications that
use libudev.
Homepage: http://www.freedesktop.org/wiki/Software/systemd
Original-Maintainer: Debian systemd Maintainers <[email protected]>
vagrant@ubuntu-xenial:~$ echo $?
0
```
Note the return code is `0`, so `SystemPackageTool` works correctly here, too.
However, if the package is uninstalled with `sudo apt-get remove libudev-dev` then `dpkg -s` still returns an exit code of 0:
```bash
vagrant@ubuntu-xenial:~$ dpkg -s libudev-dev
Package: libudev-dev
Status: deinstall ok config-files
Priority: optional
Section: libdevel
Installed-Size: 527
Maintainer: Ubuntu Developers <[email protected]>
Architecture: amd64
Multi-Arch: same
Source: systemd
Version: 229-4ubuntu21.2
Config-Version: 229-4ubuntu21.2
Depends: libacl1 (>= 2.2.51-8), libblkid1 (>= 2.19.1), libc6 (>= 2.17), libkmod2 (>= 5~), libselinux1 (>= 2.0.65), libudev1 (= 229-4ubuntu21.2)
Pre-Depends: dpkg (>= 1.17.14)
Description: libudev development files
This package contains the files needed for developing applications that
use libudev.
Homepage: http://www.freedesktop.org/wiki/Software/systemd
Original-Maintainer: Debian systemd Maintainers <[email protected]>
vagrant@ubuntu-xenial:~$ echo $?
0
```
Note the different values for the `Status` line.
**Suggested Fix**
There are discussions of the best ways to script a check for package installation here:
https://stackoverflow.com/questions/1298066/check-if-a-package-is-installed-and-then-install-it-if-its-not
https://askubuntu.com/questions/319307/reliably-check-if-a-package-is-installed-or-not?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
Those discussions indicate that the preferred way of checking is to use:
```bash
dpkg-query -W -f='${Status}' <package_name> | grep -q "ok installed"
```
This will return `0` only if the package is installed.
I will raise a PR containing the suggested fix.
**Environment**
Conan 1.4.1
Ubuntu 16.04
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/tools/system_pm.py`
Content:
```
1 import os
2 from conans.client.runner import ConanRunner
3 from conans.client.tools.oss import OSInfo
4 from conans.errors import ConanException
5 from conans.util.env_reader import get_env
6
7 _global_output = None
8
9
10 class SystemPackageTool(object):
11
12 def __init__(self, runner=None, os_info=None, tool=None, recommends=False):
13 os_info = os_info or OSInfo()
14 self._is_up_to_date = False
15 self._tool = tool or self._create_tool(os_info)
16 self._tool._sudo_str = "sudo " if self._is_sudo_enabled() else ""
17 self._tool._runner = runner or ConanRunner()
18 self._tool._recommends = recommends
19
20 @staticmethod
21 def _is_sudo_enabled():
22 if "CONAN_SYSREQUIRES_SUDO" not in os.environ:
23 if os.name == 'posix' and os.geteuid() == 0:
24 return False
25 if os.name == 'nt':
26 return False
27 return get_env("CONAN_SYSREQUIRES_SUDO", True)
28
29 @staticmethod
30 def _get_sysrequire_mode():
31 allowed_modes = ("enabled", "verify", "disabled")
32 mode = get_env("CONAN_SYSREQUIRES_MODE", "enabled")
33 mode_lower = mode.lower()
34 if mode_lower not in allowed_modes:
35 raise ConanException("CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r" % (mode, allowed_modes))
36 return mode_lower
37
38 @staticmethod
39 def _create_tool(os_info):
40 if os_info.with_apt:
41 return AptTool()
42 elif os_info.with_yum:
43 return YumTool()
44 elif os_info.with_pacman:
45 return PacManTool()
46 elif os_info.is_macos:
47 return BrewTool()
48 elif os_info.is_freebsd:
49 return PkgTool()
50 elif os_info.is_solaris:
51 return PkgUtilTool()
52 elif os_info.with_zypper:
53 return ZypperTool()
54 else:
55 return NullTool()
56
57 def update(self):
58 """
59 Get the system package tool update command
60 """
61 mode = self._get_sysrequire_mode()
62 if mode in ("disabled", "verify"):
63 _global_output.info("Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s" % mode)
64 return
65 self._is_up_to_date = True
66 self._tool.update()
67
68 def install(self, packages, update=True, force=False):
69 """
70 Get the system package tool install command.
71 '"""
72 packages = [packages] if isinstance(packages, str) else list(packages)
73
74 mode = self._get_sysrequire_mode()
75
76 if mode in ("verify", "disabled"):
77 # Report to output packages need to be installed
78 if mode == "disabled":
79 _global_output.info("The following packages need to be installed:\n %s" % "\n".join(packages))
80 return
81
82 if mode == "verify" and not self._installed(packages):
83 _global_output.error("The following packages need to be installed:\n %s" % "\n".join(packages))
84 raise ConanException(
85 "Aborted due to CONAN_SYSREQUIRES_MODE=%s. Some system packages need to be installed" % mode
86 )
87
88 if not force and self._installed(packages):
89 return
90
91 # From here system packages can be updated/modified
92 if update and not self._is_up_to_date:
93 self.update()
94 self._install_any(packages)
95
96 def _installed(self, packages):
97 if not packages:
98 return True
99
100 for pkg in packages:
101 if self._tool.installed(pkg):
102 _global_output.info("Package already installed: %s" % pkg)
103 return True
104 return False
105
106 def _install_any(self, packages):
107 if len(packages) == 1:
108 return self._tool.install(packages[0])
109 for pkg in packages:
110 try:
111 return self._tool.install(pkg)
112 except ConanException:
113 pass
114 raise ConanException("Could not install any of %s" % packages)
115
116
117 class NullTool(object):
118 def update(self):
119 pass
120
121 def install(self, package_name):
122 _global_output.warn("Only available for linux with apt-get, yum, or pacman or OSX with brew or "
123 "FreeBSD with pkg or Solaris with pkgutil")
124
125 def installed(self, package_name):
126 return False
127
128
129 class AptTool(object):
130 def update(self):
131 _run(self._runner, "%sapt-get update" % self._sudo_str)
132
133 def install(self, package_name):
134 recommends_str = '' if self._recommends else '--no-install-recommends '
135 _run(self._runner, "%sapt-get install -y %s%s" % (self._sudo_str, recommends_str, package_name))
136
137 def installed(self, package_name):
138 exit_code = self._runner("dpkg -s %s" % package_name, None)
139 return exit_code == 0
140
141
142 class YumTool(object):
143 def update(self):
144 _run(self._runner, "%syum update" % self._sudo_str, accepted_returns=[0, 100])
145
146 def install(self, package_name):
147 _run(self._runner, "%syum install -y %s" % (self._sudo_str, package_name))
148
149 def installed(self, package_name):
150 exit_code = self._runner("rpm -q %s" % package_name, None)
151 return exit_code == 0
152
153
154 class BrewTool(object):
155 def update(self):
156 _run(self._runner, "brew update")
157
158 def install(self, package_name):
159 _run(self._runner, "brew install %s" % package_name)
160
161 def installed(self, package_name):
162 exit_code = self._runner('test -n "$(brew ls --versions %s)"' % package_name, None)
163 return exit_code == 0
164
165
166 class PkgTool(object):
167 def update(self):
168 _run(self._runner, "%spkg update" % self._sudo_str)
169
170 def install(self, package_name):
171 _run(self._runner, "%spkg install -y %s" % (self._sudo_str, package_name))
172
173 def installed(self, package_name):
174 exit_code = self._runner("pkg info %s" % package_name, None)
175 return exit_code == 0
176
177
178 class PkgUtilTool(object):
179 def update(self):
180 _run(self._runner, "%spkgutil --catalog" % self._sudo_str)
181
182 def install(self, package_name):
183 _run(self._runner, "%spkgutil --install --yes %s" % (self._sudo_str, package_name))
184
185 def installed(self, package_name):
186 exit_code = self._runner('test -n "`pkgutil --list %s`"' % package_name, None)
187 return exit_code == 0
188
189
190 class ChocolateyTool(object):
191 def update(self):
192 _run(self._runner, "choco outdated")
193
194 def install(self, package_name):
195 _run(self._runner, "choco install --yes %s" % package_name)
196
197 def installed(self, package_name):
198 exit_code = self._runner('choco search --local-only --exact %s | '
199 'findstr /c:"1 packages installed."' % package_name, None)
200 return exit_code == 0
201
202
203 class PacManTool(object):
204 def update(self):
205 _run(self._runner, "%spacman -Syyu --noconfirm" % self._sudo_str)
206
207 def install(self, package_name):
208 _run(self._runner, "%spacman -S --noconfirm %s" % (self._sudo_str, package_name))
209
210 def installed(self, package_name):
211 exit_code = self._runner("pacman -Qi %s" % package_name, None)
212 return exit_code == 0
213
214
215 class ZypperTool(object):
216 def update(self):
217 _run(self._runner, "%szypper --non-interactive ref" % self._sudo_str)
218
219 def install(self, package_name):
220 _run(self._runner, "%szypper --non-interactive in %s" % (self._sudo_str, package_name))
221
222 def installed(self, package_name):
223 exit_code = self._runner("rpm -q %s" % package_name, None)
224 return exit_code == 0
225
226
227 def _run(runner, command, accepted_returns=None):
228 accepted_returns = accepted_returns or [0, ]
229 _global_output.info("Running: %s" % command)
230 if runner(command, True) not in accepted_returns:
231 raise ConanException("Command '%s' failed" % command)
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py
--- a/conans/client/tools/system_pm.py
+++ b/conans/client/tools/system_pm.py
@@ -135,7 +135,7 @@
_run(self._runner, "%sapt-get install -y %s%s" % (self._sudo_str, recommends_str, package_name))
def installed(self, package_name):
- exit_code = self._runner("dpkg -s %s" % package_name, None)
+ exit_code = self._runner("dpkg-query -W -f='${Status}' %s | grep -q \"ok installed\"" % package_name, None)
return exit_code == 0
| {"golden_diff": "diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py\n--- a/conans/client/tools/system_pm.py\n+++ b/conans/client/tools/system_pm.py\n@@ -135,7 +135,7 @@\n _run(self._runner, \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name))\n \n def installed(self, package_name):\n- exit_code = self._runner(\"dpkg -s %s\" % package_name, None)\n+ exit_code = self._runner(\"dpkg-query -W -f='${Status}' %s | grep -q \\\"ok installed\\\"\" % package_name, None)\n return exit_code == 0\n", "issue": "[bug] SystemPackageTool doesn't correctly detect when apt packages are installed\n**Summary**\r\n`SystemPackageTool` uses `dpkg -s <package_name>` to detect whether a package is installed via apt. This is not a sufficient check, and fails to detect that a package has been uninstalled.\r\n\r\nLine where the issue is:\r\nhttps://github.com/conan-io/conan/blob/d508c44031542c95051d6036047ce156b935e8bc/conans/client/tools/system_pm.py#L134\r\n\r\n**Reproduction**\r\nOn a fresh Ubuntu 16.04 box without the `libudev-dev` package installed, `dpkg -s` gives this behaviour:\r\n```bash\r\nvagrant@ubuntu-xenial:~$ dpkg -s libudev-dev\r\ndpkg-query: package 'libudev-dev' is not installed and no information is available\r\nUse dpkg --info (= dpkg-deb --info) to examine archive files,\r\nand dpkg --contents (= dpkg-deb --contents) to list their contents.\r\n\r\nvagrant@ubuntu-xenial:~$ echo $?\r\n1\r\n```\r\nThe return value is `1`, so `SystemPackageTool` correctly detects that the package is not installed.\r\n\r\nIf the package is installed with `sudo apt-get install libudev-dev` then `dpkg -s` gives this behaviour:\r\n```bash\r\nvagrant@ubuntu-xenial:~$ dpkg -s libudev-dev\r\nPackage: libudev-dev\r\nStatus: install ok installed\r\nPriority: optional\r\nSection: libdevel\r\nInstalled-Size: 527\r\nMaintainer: Ubuntu Developers <[email protected]>\r\nArchitecture: amd64\r\nMulti-Arch: same\r\nSource: systemd\r\nVersion: 229-4ubuntu21.2\r\nDepends: libacl1 (>= 2.2.51-8), libblkid1 (>= 2.19.1), libc6 (>= 2.17), libkmod2 (>= 5~), libselinux1 (>= 2.0.65), libudev1 (= 229-4ubuntu21.2)\r\nPre-Depends: dpkg (>= 1.17.14)\r\nDescription: libudev development files\r\n This package contains the files needed for developing applications that\r\n use libudev.\r\nHomepage: http://www.freedesktop.org/wiki/Software/systemd\r\nOriginal-Maintainer: Debian systemd Maintainers <[email protected]>\r\n\r\nvagrant@ubuntu-xenial:~$ echo $?\r\n0\r\n```\r\nNote the return code is `0`, so `SystemPackageTool` works correctly here, too.\r\n\r\nHowever, if the package is uninstalled with `sudo apt-get remove libudev-dev` then `dpkg -s` still returns an exit code of 0:\r\n```bash\r\nvagrant@ubuntu-xenial:~$ dpkg -s libudev-dev\r\nPackage: libudev-dev\r\nStatus: deinstall ok config-files\r\nPriority: optional\r\nSection: libdevel\r\nInstalled-Size: 527\r\nMaintainer: Ubuntu Developers <[email protected]>\r\nArchitecture: amd64\r\nMulti-Arch: same\r\nSource: systemd\r\nVersion: 229-4ubuntu21.2\r\nConfig-Version: 229-4ubuntu21.2\r\nDepends: libacl1 (>= 2.2.51-8), libblkid1 (>= 2.19.1), libc6 (>= 2.17), libkmod2 (>= 5~), libselinux1 (>= 2.0.65), libudev1 (= 229-4ubuntu21.2)\r\nPre-Depends: dpkg (>= 1.17.14)\r\nDescription: libudev development files\r\n This package contains the files needed for developing applications that\r\n use libudev.\r\nHomepage: http://www.freedesktop.org/wiki/Software/systemd\r\nOriginal-Maintainer: Debian systemd Maintainers <[email protected]>\r\n\r\nvagrant@ubuntu-xenial:~$ echo $?\r\n0\r\n```\r\n\r\nNote the different values for the `Status` line.\r\n\r\n**Suggested Fix**\r\nThere are discussions of the best ways to script a check for package installation here:\r\nhttps://stackoverflow.com/questions/1298066/check-if-a-package-is-installed-and-then-install-it-if-its-not\r\nhttps://askubuntu.com/questions/319307/reliably-check-if-a-package-is-installed-or-not?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\r\n\r\nThose discussions indicate that the preferred way of checking is to use:\r\n```bash\r\ndpkg-query -W -f='${Status}' <package_name> | grep -q \"ok installed\"\r\n```\r\nThis will return `0` only if the package is installed.\r\n\r\nI will raise a PR containing the suggested fix.\r\n\r\n**Environment**\r\nConan 1.4.1\r\nUbuntu 16.04\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import os\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\n\n_global_output = None\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False):\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info)\n self._tool._sudo_str = \"sudo \" if self._is_sudo_enabled() else \"\"\n self._tool._runner = runner or ConanRunner()\n self._tool._recommends = recommends\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\" % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info):\n if os_info.with_apt:\n return AptTool()\n elif os_info.with_yum:\n return YumTool()\n elif os_info.with_pacman:\n return PacManTool()\n elif os_info.is_macos:\n return BrewTool()\n elif os_info.is_freebsd:\n return PkgTool()\n elif os_info.is_solaris:\n return PkgUtilTool()\n elif os_info.with_zypper:\n return ZypperTool()\n else:\n return NullTool()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode in (\"disabled\", \"verify\"):\n _global_output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n _global_output.info(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n _global_output.error(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n raise ConanException(\n \"Aborted due to CONAN_SYSREQUIRES_MODE=%s. Some system packages need to be installed\" % mode\n )\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n _global_output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass NullTool(object):\n def update(self):\n pass\n\n def install(self, package_name):\n _global_output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or \"\n \"FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(object):\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner, \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg -s %s\" % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(object):\n def update(self):\n _run(self._runner, \"%syum update\" % self._sudo_str, accepted_returns=[0, 100])\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(object):\n def update(self):\n _run(self._runner, \"brew update\")\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(object):\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(object):\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(object):\n def update(self):\n _run(self._runner, \"choco outdated\")\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | '\n 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(object):\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(object):\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\ndef _run(runner, command, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n _global_output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}], "after_files": [{"content": "import os\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\n\n_global_output = None\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False):\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info)\n self._tool._sudo_str = \"sudo \" if self._is_sudo_enabled() else \"\"\n self._tool._runner = runner or ConanRunner()\n self._tool._recommends = recommends\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\" % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info):\n if os_info.with_apt:\n return AptTool()\n elif os_info.with_yum:\n return YumTool()\n elif os_info.with_pacman:\n return PacManTool()\n elif os_info.is_macos:\n return BrewTool()\n elif os_info.is_freebsd:\n return PkgTool()\n elif os_info.is_solaris:\n return PkgUtilTool()\n elif os_info.with_zypper:\n return ZypperTool()\n else:\n return NullTool()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode in (\"disabled\", \"verify\"):\n _global_output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n _global_output.info(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n _global_output.error(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n raise ConanException(\n \"Aborted due to CONAN_SYSREQUIRES_MODE=%s. Some system packages need to be installed\" % mode\n )\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n _global_output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass NullTool(object):\n def update(self):\n pass\n\n def install(self, package_name):\n _global_output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or \"\n \"FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(object):\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner, \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg-query -W -f='${Status}' %s | grep -q \\\"ok installed\\\"\" % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(object):\n def update(self):\n _run(self._runner, \"%syum update\" % self._sudo_str, accepted_returns=[0, 100])\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(object):\n def update(self):\n _run(self._runner, \"brew update\")\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(object):\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(object):\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(object):\n def update(self):\n _run(self._runner, \"choco outdated\")\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | '\n 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(object):\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(object):\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\ndef _run(runner, command, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n _global_output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}]} | 3,971 | 159 |
gh_patches_debug_33625 | rasdani/github-patches | git_diff | robocorp__rpaframework-971 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RPA.Browser.Selenium fails in cloud workers if download=TRUE
2023-05-08 12:47:36: ==============================================================================
2023-05-08 12:47:36: Tasks
2023-05-08 12:47:36: ==============================================================================
2023-05-08 12:47:46: Minimal task | FAIL |
2023-05-08 12:47:46: BrowserNotFoundError: Failed to start a browser:
2023-05-08 12:47:46: - Chrome: Message: session not created: This version of ChromeDriver only supports Chrome version 113
2023-05-08 12:47:46: Current browser version is 105.0.5195.102 with binary path /usr/bin/chromium-browser
2023-05-08 12:47:46: Stacktrace:
2023-05-08 12:47:46: #0 0x55e4a398c133 <unknown>
2023-05-08 12:47:46: #1 0x55e4a36c0966 <unknown>
2023-05-08 12:47:46: #2 0x55e4a36ee5ec <unknown>
2023-05-08 12:47:46: #3 0x55e4a36e98da <unknown>
2023-05-08 12:47:46: #4 0x55e4a36e607b <unknown>
2023-05-08 12:47:46: #5 0x55e4a37259ad <unknown>
2023-05-08 12:47:46: #6 0x55e4a372518f <unknown>
2023-05-08 12:47:46: #7 0x55e4a371c9a3 <unknown>
2023-05-08 12:47:46: #8 0x55e4a36f146a <unknown>
2023-05-08 12:47:46: #9 0x55e4a36f255e <unknown>
2023-05-08 12:47:46: #10 0x55e4a394bcae <unknown>
2023-05-08 12:47:46: #11 0x55e4a394f8fe <unknown>
2023-05-08 12:47:46: #12 0x55e4a3958f20 <unknown>
2023-05-08 12:47:46: #13 0x55e4a3950923 <unknown>
2023-05-08 12:47:46: [ Message content over the limit has been removed. ]
2023-05-08 12:47:46: #0 0x55c657673273 <unknown>
2023-05-08 12:47:46: #1 0x55c65738e7a1 <unknown>
2023-05-08 12:47:46: #2 0x55c6573babeb <unknown>
2023-05-08 12:47:46: #3 0x55c6573b5fdc <unknown>
2023-05-08 12:47:46: #4 0x55c6573f7695 <unknown>
2023-05-08 12:47:46: #5 0x55c6573ee723 <unknown>
2023-05-08 12:47:46: #6 0x55c6573c17d1 <unknown>
2023-05-08 12:47:46: #7 0x55c6573c2a0e <unknown>
2023-05-08 12:47:46: #8 0x55c657641390 <unknown>
2023-05-08 12:47:46: #9 0x55c657643a9e <unknown>
2023-05-08 12:47:46: #10 0x55c6576434b9 <unknown>
2023-05-08 12:47:46: #11 0x55c6576441a5 <unknown>
2023-05-08 12:47:46: #12 0x55c65764ae0b <unknown>
2023-05-08 12:47:46: #13 0x55c65764456e <unknown>
2023-05-08 12:47:46: #14 0x55c657625373 <unknown>
2023-05-08 12:47:46: #15 0x55c65765fc58 <unknown>
2023-05-08 12:47:46: #16 0x55c65765fd94 <unknown>
2023-05-08 12:47:46: #17 0x55c65766d5c6 <unknown>
2023-05-08 12:47:46: #18 0x7f8de95b36db start_thread
2023-05-08 12:47:46: ------------------------------------------------------------------------------
2023-05-08 12:47:46: Tasks | FAIL |
2023-05-08 12:47:46: 1 task, 0 passed, 1 failed
2023-05-08 12:47:46: ==============================================================================
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/core/src/RPA/core/webdriver.py`
Content:
```
1 import contextlib
2 import logging
3 import os
4 import platform
5 import stat
6 from pathlib import Path
7 from typing import List, Optional
8
9 import requests
10 from requests import Response
11 from selenium import webdriver
12 from selenium.webdriver.common.service import Service
13 from selenium.webdriver.remote.webdriver import WebDriver
14 from webdriver_manager.chrome import ChromeDriverManager
15 from webdriver_manager.core.download_manager import WDMDownloadManager
16 from webdriver_manager.core.http import WDMHttpClient
17 from webdriver_manager.core.manager import DriverManager
18 from webdriver_manager.core.utils import os_name as get_os_name
19 from webdriver_manager.firefox import GeckoDriverManager
20 from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager
21 from webdriver_manager.opera import OperaDriverManager
22
23 from RPA.core.robocorp import robocorp_home
24
25
26 LOGGER = logging.getLogger(__name__)
27
28 DRIVER_ROOT = robocorp_home() / "webdrivers"
29 AVAILABLE_DRIVERS = {
30 # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.
31 "chrome": ChromeDriverManager,
32 "firefox": GeckoDriverManager,
33 "gecko": GeckoDriverManager,
34 "mozilla": GeckoDriverManager,
35 # NOTE: Selenium 4 dropped support for Opera.
36 # (https://github.com/SeleniumHQ/selenium/issues/10835)
37 "opera": OperaDriverManager,
38 # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.
39 "edge": EdgeChromiumDriverManager,
40 "chromiumedge": EdgeChromiumDriverManager,
41 # NOTE: IE is discontinued and not supported/encouraged anymore.
42 "ie": IEDriverManager,
43 }
44 # Available `WebDriver` classes in Selenium.
45 SUPPORTED_BROWSERS = dict(
46 {name: name.capitalize() for name in AVAILABLE_DRIVERS},
47 **{"chromiumedge": "ChromiumEdge"},
48 )
49 _DRIVER_PREFERENCE = {
50 "Windows": ["Chrome", "Firefox", "Edge"],
51 "Linux": ["Chrome", "Firefox", "Edge"],
52 "Darwin": ["Chrome", "Firefox", "Edge", "Safari"],
53 "default": ["Chrome", "Firefox"],
54 }
55
56
57 def _get_browser_order_from_env() -> Optional[List[str]]:
58 browsers: str = os.getenv("RPA_SELENIUM_BROWSER_ORDER", "")
59 if browsers:
60 return [browser.strip() for browser in browsers.split(sep=",")]
61
62 return None # meaning there's no env var to control the order
63
64
65 def get_browser_order() -> List[str]:
66 """Get a list of preferred browsers based on the environment variable
67 `RPA_SELENIUM_BROWSER_ORDER` if set.
68
69 The OS dictates the order if no such env var is set.
70 """
71 browsers: Optional[List[str]] = _get_browser_order_from_env()
72 if browsers:
73 return browsers
74
75 return _DRIVER_PREFERENCE.get(platform.system(), _DRIVER_PREFERENCE["default"])
76
77
78 class Downloader(WDMHttpClient):
79
80 """Custom downloader which disables download progress reporting."""
81
82 def get(self, url, **kwargs) -> Response:
83 resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)
84 self.validate_response(resp)
85 return resp
86
87
88 @contextlib.contextmanager
89 def suppress_logging():
90 """Suppress webdriver-manager logging."""
91 wdm_log = "WDM_LOG"
92 original_value = os.getenv(wdm_log, "")
93 try:
94 os.environ[wdm_log] = str(logging.NOTSET)
95 yield
96 finally:
97 os.environ[wdm_log] = original_value
98
99
100 def start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:
101 """Start a webdriver with the given options."""
102 browser = browser.strip()
103 webdriver_factory = getattr(webdriver, browser, None)
104 if not webdriver_factory:
105 raise ValueError(f"Unsupported Selenium browser: {browser}")
106
107 # NOTE: It is recommended to pass a `service` rather than deprecated `options`.
108 driver = webdriver_factory(service=service, **options)
109 return driver
110
111
112 def _to_manager(browser: str, *, root: Path) -> DriverManager:
113 browser = browser.strip()
114 manager_factory = AVAILABLE_DRIVERS.get(browser.lower())
115 if not manager_factory:
116 raise ValueError(
117 f"Unsupported browser {browser!r} for webdriver download!"
118 f" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})"
119 )
120
121 downloader = Downloader()
122 download_manager = WDMDownloadManager(downloader)
123 manager = manager_factory(path=str(root), download_manager=download_manager)
124 return manager
125
126
127 def _set_executable(path: str) -> None:
128 st = os.stat(path)
129 os.chmod(
130 path,
131 st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,
132 )
133
134
135 def download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:
136 """Download a webdriver binary for the given browser and return the path to it."""
137 manager = _to_manager(browser, root=root)
138 driver = manager.driver
139 resolved_os = getattr(driver, "os_type", driver.get_os_type())
140 os_name = get_os_name()
141 if os_name.lower() not in resolved_os.lower():
142 LOGGER.warning(
143 "Attempting to download incompatible driver for OS %r on OS %r! Skip",
144 resolved_os,
145 os_name,
146 )
147 return None # incompatible driver download attempt
148
149 with suppress_logging():
150 path: str = manager.install()
151 if platform.system() != "Windows":
152 _set_executable(path)
153 LOGGER.info("Downloaded webdriver to: %s", path)
154 return path
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py
--- a/packages/core/src/RPA/core/webdriver.py
+++ b/packages/core/src/RPA/core/webdriver.py
@@ -1,4 +1,5 @@
import contextlib
+import functools
import logging
import os
import platform
@@ -15,6 +16,7 @@
from webdriver_manager.core.download_manager import WDMDownloadManager
from webdriver_manager.core.http import WDMHttpClient
from webdriver_manager.core.manager import DriverManager
+from webdriver_manager.core.utils import ChromeType, get_browser_version_from_os
from webdriver_manager.core.utils import os_name as get_os_name
from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager
@@ -109,6 +111,14 @@
return driver
+def _is_chromium() -> bool:
+ """Detects if Chromium is used instead of Chrome no matter the platform."""
+ is_browser = lambda browser_type: bool( # noqa: E731
+ get_browser_version_from_os(browser_type)
+ )
+ return not is_browser(ChromeType.GOOGLE) and is_browser(ChromeType.CHROMIUM)
+
+
def _to_manager(browser: str, *, root: Path) -> DriverManager:
browser = browser.strip()
manager_factory = AVAILABLE_DRIVERS.get(browser.lower())
@@ -118,6 +128,10 @@
f" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})"
)
+ if manager_factory == ChromeDriverManager and _is_chromium():
+ manager_factory = functools.partial(
+ manager_factory, chrome_type=ChromeType.CHROMIUM
+ )
downloader = Downloader()
download_manager = WDMDownloadManager(downloader)
manager = manager_factory(path=str(root), download_manager=download_manager)
| {"golden_diff": "diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py\n--- a/packages/core/src/RPA/core/webdriver.py\n+++ b/packages/core/src/RPA/core/webdriver.py\n@@ -1,4 +1,5 @@\n import contextlib\n+import functools\n import logging\n import os\n import platform\n@@ -15,6 +16,7 @@\n from webdriver_manager.core.download_manager import WDMDownloadManager\n from webdriver_manager.core.http import WDMHttpClient\n from webdriver_manager.core.manager import DriverManager\n+from webdriver_manager.core.utils import ChromeType, get_browser_version_from_os\n from webdriver_manager.core.utils import os_name as get_os_name\n from webdriver_manager.firefox import GeckoDriverManager\n from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\n@@ -109,6 +111,14 @@\n return driver\n \n \n+def _is_chromium() -> bool:\n+ \"\"\"Detects if Chromium is used instead of Chrome no matter the platform.\"\"\"\n+ is_browser = lambda browser_type: bool( # noqa: E731\n+ get_browser_version_from_os(browser_type)\n+ )\n+ return not is_browser(ChromeType.GOOGLE) and is_browser(ChromeType.CHROMIUM)\n+\n+\n def _to_manager(browser: str, *, root: Path) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n@@ -118,6 +128,10 @@\n f\" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})\"\n )\n \n+ if manager_factory == ChromeDriverManager and _is_chromium():\n+ manager_factory = functools.partial(\n+ manager_factory, chrome_type=ChromeType.CHROMIUM\n+ )\n downloader = Downloader()\n download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n", "issue": "RPA.Browser.Selenium fails in cloud workers if download=TRUE\n2023-05-08 12:47:36: ==============================================================================\r\n2023-05-08 12:47:36: Tasks \r\n2023-05-08 12:47:36: ==============================================================================\r\n2023-05-08 12:47:46: Minimal task | FAIL |\r\n2023-05-08 12:47:46: BrowserNotFoundError: Failed to start a browser:\r\n2023-05-08 12:47:46: - Chrome: Message: session not created: This version of ChromeDriver only supports Chrome version 113\r\n2023-05-08 12:47:46: Current browser version is 105.0.5195.102 with binary path /usr/bin/chromium-browser\r\n2023-05-08 12:47:46: Stacktrace:\r\n2023-05-08 12:47:46: #0 0x55e4a398c133 <unknown>\r\n2023-05-08 12:47:46: #1 0x55e4a36c0966 <unknown>\r\n2023-05-08 12:47:46: #2 0x55e4a36ee5ec <unknown>\r\n2023-05-08 12:47:46: #3 0x55e4a36e98da <unknown>\r\n2023-05-08 12:47:46: #4 0x55e4a36e607b <unknown>\r\n2023-05-08 12:47:46: #5 0x55e4a37259ad <unknown>\r\n2023-05-08 12:47:46: #6 0x55e4a372518f <unknown>\r\n2023-05-08 12:47:46: #7 0x55e4a371c9a3 <unknown>\r\n2023-05-08 12:47:46: #8 0x55e4a36f146a <unknown>\r\n2023-05-08 12:47:46: #9 0x55e4a36f255e <unknown>\r\n2023-05-08 12:47:46: #10 0x55e4a394bcae <unknown>\r\n2023-05-08 12:47:46: #11 0x55e4a394f8fe <unknown>\r\n2023-05-08 12:47:46: #12 0x55e4a3958f20 <unknown>\r\n2023-05-08 12:47:46: #13 0x55e4a3950923 <unknown>\r\n2023-05-08 12:47:46: [ Message content over the limit has been removed. ]\r\n2023-05-08 12:47:46: #0 0x55c657673273 <unknown>\r\n2023-05-08 12:47:46: #1 0x55c65738e7a1 <unknown>\r\n2023-05-08 12:47:46: #2 0x55c6573babeb <unknown>\r\n2023-05-08 12:47:46: #3 0x55c6573b5fdc <unknown>\r\n2023-05-08 12:47:46: #4 0x55c6573f7695 <unknown>\r\n2023-05-08 12:47:46: #5 0x55c6573ee723 <unknown>\r\n2023-05-08 12:47:46: #6 0x55c6573c17d1 <unknown>\r\n2023-05-08 12:47:46: #7 0x55c6573c2a0e <unknown>\r\n2023-05-08 12:47:46: #8 0x55c657641390 <unknown>\r\n2023-05-08 12:47:46: #9 0x55c657643a9e <unknown>\r\n2023-05-08 12:47:46: #10 0x55c6576434b9 <unknown>\r\n2023-05-08 12:47:46: #11 0x55c6576441a5 <unknown>\r\n2023-05-08 12:47:46: #12 0x55c65764ae0b <unknown>\r\n2023-05-08 12:47:46: #13 0x55c65764456e <unknown>\r\n2023-05-08 12:47:46: #14 0x55c657625373 <unknown>\r\n2023-05-08 12:47:46: #15 0x55c65765fc58 <unknown>\r\n2023-05-08 12:47:46: #16 0x55c65765fd94 <unknown>\r\n2023-05-08 12:47:46: #17 0x55c65766d5c6 <unknown>\r\n2023-05-08 12:47:46: #18 0x7f8de95b36db start_thread\r\n2023-05-08 12:47:46: ------------------------------------------------------------------------------\r\n2023-05-08 12:47:46: Tasks | FAIL |\r\n2023-05-08 12:47:46: 1 task, 0 passed, 1 failed\r\n2023-05-08 12:47:46: ==============================================================================\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os\nimport platform\nimport stat\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport requests\nfrom requests import Response\nfrom selenium import webdriver\nfrom selenium.webdriver.common.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.download_manager import WDMDownloadManager\nfrom webdriver_manager.core.http import WDMHttpClient\nfrom webdriver_manager.core.manager import DriverManager\nfrom webdriver_manager.core.utils import os_name as get_os_name\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\nfrom RPA.core.robocorp import robocorp_home\n\n\nLOGGER = logging.getLogger(__name__)\n\nDRIVER_ROOT = robocorp_home() / \"webdrivers\"\nAVAILABLE_DRIVERS = {\n # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.\n \"chrome\": ChromeDriverManager,\n \"firefox\": GeckoDriverManager,\n \"gecko\": GeckoDriverManager,\n \"mozilla\": GeckoDriverManager,\n # NOTE: Selenium 4 dropped support for Opera.\n # (https://github.com/SeleniumHQ/selenium/issues/10835)\n \"opera\": OperaDriverManager,\n # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.\n \"edge\": EdgeChromiumDriverManager,\n \"chromiumedge\": EdgeChromiumDriverManager,\n # NOTE: IE is discontinued and not supported/encouraged anymore.\n \"ie\": IEDriverManager,\n}\n# Available `WebDriver` classes in Selenium.\nSUPPORTED_BROWSERS = dict(\n {name: name.capitalize() for name in AVAILABLE_DRIVERS},\n **{\"chromiumedge\": \"ChromiumEdge\"},\n)\n_DRIVER_PREFERENCE = {\n \"Windows\": [\"Chrome\", \"Firefox\", \"Edge\"],\n \"Linux\": [\"Chrome\", \"Firefox\", \"Edge\"],\n \"Darwin\": [\"Chrome\", \"Firefox\", \"Edge\", \"Safari\"],\n \"default\": [\"Chrome\", \"Firefox\"],\n}\n\n\ndef _get_browser_order_from_env() -> Optional[List[str]]:\n browsers: str = os.getenv(\"RPA_SELENIUM_BROWSER_ORDER\", \"\")\n if browsers:\n return [browser.strip() for browser in browsers.split(sep=\",\")]\n\n return None # meaning there's no env var to control the order\n\n\ndef get_browser_order() -> List[str]:\n \"\"\"Get a list of preferred browsers based on the environment variable\n `RPA_SELENIUM_BROWSER_ORDER` if set.\n\n The OS dictates the order if no such env var is set.\n \"\"\"\n browsers: Optional[List[str]] = _get_browser_order_from_env()\n if browsers:\n return browsers\n\n return _DRIVER_PREFERENCE.get(platform.system(), _DRIVER_PREFERENCE[\"default\"])\n\n\nclass Downloader(WDMHttpClient):\n\n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n\n def get(self, url, **kwargs) -> Response:\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n\n\[email protected]\ndef suppress_logging():\n \"\"\"Suppress webdriver-manager logging.\"\"\"\n wdm_log = \"WDM_LOG\"\n original_value = os.getenv(wdm_log, \"\")\n try:\n os.environ[wdm_log] = str(logging.NOTSET)\n yield\n finally:\n os.environ[wdm_log] = original_value\n\n\ndef start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:\n \"\"\"Start a webdriver with the given options.\"\"\"\n browser = browser.strip()\n webdriver_factory = getattr(webdriver, browser, None)\n if not webdriver_factory:\n raise ValueError(f\"Unsupported Selenium browser: {browser}\")\n\n # NOTE: It is recommended to pass a `service` rather than deprecated `options`.\n driver = webdriver_factory(service=service, **options)\n return driver\n\n\ndef _to_manager(browser: str, *, root: Path) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n if not manager_factory:\n raise ValueError(\n f\"Unsupported browser {browser!r} for webdriver download!\"\n f\" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})\"\n )\n\n downloader = Downloader()\n download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n return manager\n\n\ndef _set_executable(path: str) -> None:\n st = os.stat(path)\n os.chmod(\n path,\n st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,\n )\n\n\ndef download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:\n \"\"\"Download a webdriver binary for the given browser and return the path to it.\"\"\"\n manager = _to_manager(browser, root=root)\n driver = manager.driver\n resolved_os = getattr(driver, \"os_type\", driver.get_os_type())\n os_name = get_os_name()\n if os_name.lower() not in resolved_os.lower():\n LOGGER.warning(\n \"Attempting to download incompatible driver for OS %r on OS %r! Skip\",\n resolved_os,\n os_name,\n )\n return None # incompatible driver download attempt\n\n with suppress_logging():\n path: str = manager.install()\n if platform.system() != \"Windows\":\n _set_executable(path)\n LOGGER.info(\"Downloaded webdriver to: %s\", path)\n return path\n", "path": "packages/core/src/RPA/core/webdriver.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport logging\nimport os\nimport platform\nimport stat\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport requests\nfrom requests import Response\nfrom selenium import webdriver\nfrom selenium.webdriver.common.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.download_manager import WDMDownloadManager\nfrom webdriver_manager.core.http import WDMHttpClient\nfrom webdriver_manager.core.manager import DriverManager\nfrom webdriver_manager.core.utils import ChromeType, get_browser_version_from_os\nfrom webdriver_manager.core.utils import os_name as get_os_name\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\nfrom RPA.core.robocorp import robocorp_home\n\n\nLOGGER = logging.getLogger(__name__)\n\nDRIVER_ROOT = robocorp_home() / \"webdrivers\"\nAVAILABLE_DRIVERS = {\n # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.\n \"chrome\": ChromeDriverManager,\n \"firefox\": GeckoDriverManager,\n \"gecko\": GeckoDriverManager,\n \"mozilla\": GeckoDriverManager,\n # NOTE: Selenium 4 dropped support for Opera.\n # (https://github.com/SeleniumHQ/selenium/issues/10835)\n \"opera\": OperaDriverManager,\n # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.\n \"edge\": EdgeChromiumDriverManager,\n \"chromiumedge\": EdgeChromiumDriverManager,\n # NOTE: IE is discontinued and not supported/encouraged anymore.\n \"ie\": IEDriverManager,\n}\n# Available `WebDriver` classes in Selenium.\nSUPPORTED_BROWSERS = dict(\n {name: name.capitalize() for name in AVAILABLE_DRIVERS},\n **{\"chromiumedge\": \"ChromiumEdge\"},\n)\n_DRIVER_PREFERENCE = {\n \"Windows\": [\"Chrome\", \"Firefox\", \"Edge\"],\n \"Linux\": [\"Chrome\", \"Firefox\", \"Edge\"],\n \"Darwin\": [\"Chrome\", \"Firefox\", \"Edge\", \"Safari\"],\n \"default\": [\"Chrome\", \"Firefox\"],\n}\n\n\ndef _get_browser_order_from_env() -> Optional[List[str]]:\n browsers: str = os.getenv(\"RPA_SELENIUM_BROWSER_ORDER\", \"\")\n if browsers:\n return [browser.strip() for browser in browsers.split(sep=\",\")]\n\n return None # meaning there's no env var to control the order\n\n\ndef get_browser_order() -> List[str]:\n \"\"\"Get a list of preferred browsers based on the environment variable\n `RPA_SELENIUM_BROWSER_ORDER` if set.\n\n The OS dictates the order if no such env var is set.\n \"\"\"\n browsers: Optional[List[str]] = _get_browser_order_from_env()\n if browsers:\n return browsers\n\n return _DRIVER_PREFERENCE.get(platform.system(), _DRIVER_PREFERENCE[\"default\"])\n\n\nclass Downloader(WDMHttpClient):\n\n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n\n def get(self, url, **kwargs) -> Response:\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n\n\[email protected]\ndef suppress_logging():\n \"\"\"Suppress webdriver-manager logging.\"\"\"\n wdm_log = \"WDM_LOG\"\n original_value = os.getenv(wdm_log, \"\")\n try:\n os.environ[wdm_log] = str(logging.NOTSET)\n yield\n finally:\n os.environ[wdm_log] = original_value\n\n\ndef start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:\n \"\"\"Start a webdriver with the given options.\"\"\"\n browser = browser.strip()\n webdriver_factory = getattr(webdriver, browser, None)\n if not webdriver_factory:\n raise ValueError(f\"Unsupported Selenium browser: {browser}\")\n\n # NOTE: It is recommended to pass a `service` rather than deprecated `options`.\n driver = webdriver_factory(service=service, **options)\n return driver\n\n\ndef _is_chromium() -> bool:\n \"\"\"Detects if Chromium is used instead of Chrome no matter the platform.\"\"\"\n is_browser = lambda browser_type: bool( # noqa: E731\n get_browser_version_from_os(browser_type)\n )\n return not is_browser(ChromeType.GOOGLE) and is_browser(ChromeType.CHROMIUM)\n\n\ndef _to_manager(browser: str, *, root: Path) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n if not manager_factory:\n raise ValueError(\n f\"Unsupported browser {browser!r} for webdriver download!\"\n f\" (choose from: {', '.join(SUPPORTED_BROWSERS.values())})\"\n )\n\n if manager_factory == ChromeDriverManager and _is_chromium():\n manager_factory = functools.partial(\n manager_factory, chrome_type=ChromeType.CHROMIUM\n )\n downloader = Downloader()\n download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n return manager\n\n\ndef _set_executable(path: str) -> None:\n st = os.stat(path)\n os.chmod(\n path,\n st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,\n )\n\n\ndef download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:\n \"\"\"Download a webdriver binary for the given browser and return the path to it.\"\"\"\n manager = _to_manager(browser, root=root)\n driver = manager.driver\n resolved_os = getattr(driver, \"os_type\", driver.get_os_type())\n os_name = get_os_name()\n if os_name.lower() not in resolved_os.lower():\n LOGGER.warning(\n \"Attempting to download incompatible driver for OS %r on OS %r! Skip\",\n resolved_os,\n os_name,\n )\n return None # incompatible driver download attempt\n\n with suppress_logging():\n path: str = manager.install()\n if platform.system() != \"Windows\":\n _set_executable(path)\n LOGGER.info(\"Downloaded webdriver to: %s\", path)\n return path\n", "path": "packages/core/src/RPA/core/webdriver.py"}]} | 3,583 | 420 |
gh_patches_debug_63280 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rust hook requires `--path` attribute
Cargo has changed how packages get installed and requires an extra `--path <destination>` attribute.
Symptom:
```
[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.
[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')
Return code: 101
Expected return code: 0
Output: (none)
Errors:
error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.
```
I guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87
Do we want to make pre-commit compatible with multiple versions of cargo or just the latest one?
/cc @asottile @chriskuehl
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/rust.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os.path
5
6 import toml
7
8 import pre_commit.constants as C
9 from pre_commit.envcontext import envcontext
10 from pre_commit.envcontext import Var
11 from pre_commit.languages import helpers
12 from pre_commit.util import clean_path_on_failure
13 from pre_commit.util import cmd_output
14
15
16 ENVIRONMENT_DIR = 'rustenv'
17 get_default_version = helpers.basic_get_default_version
18 healthy = helpers.basic_healthy
19
20
21 def get_env_patch(target_dir):
22 return (
23 (
24 'PATH',
25 (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),
26 ),
27 )
28
29
30 @contextlib.contextmanager
31 def in_env(prefix):
32 target_dir = prefix.path(
33 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
34 )
35 with envcontext(get_env_patch(target_dir)):
36 yield
37
38
39 def _add_dependencies(cargo_toml_path, additional_dependencies):
40 with open(cargo_toml_path, 'r+') as f:
41 cargo_toml = toml.load(f)
42 cargo_toml.setdefault('dependencies', {})
43 for dep in additional_dependencies:
44 name, _, spec = dep.partition(':')
45 cargo_toml['dependencies'][name] = spec or '*'
46 f.seek(0)
47 toml.dump(cargo_toml, f)
48 f.truncate()
49
50
51 def install_environment(prefix, version, additional_dependencies):
52 helpers.assert_version_default('rust', version)
53 directory = prefix.path(
54 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
55 )
56
57 # There are two cases where we might want to specify more dependencies:
58 # as dependencies for the library being built, and as binary packages
59 # to be `cargo install`'d.
60 #
61 # Unlike e.g. Python, if we just `cargo install` a library, it won't be
62 # used for compilation. And if we add a crate providing a binary to the
63 # `Cargo.toml`, the binary won't be built.
64 #
65 # Because of this, we allow specifying "cli" dependencies by prefixing
66 # with 'cli:'.
67 cli_deps = {
68 dep for dep in additional_dependencies if dep.startswith('cli:')
69 }
70 lib_deps = set(additional_dependencies) - cli_deps
71
72 if len(lib_deps) > 0:
73 _add_dependencies(prefix.path('Cargo.toml'), lib_deps)
74
75 with clean_path_on_failure(directory):
76 packages_to_install = {()}
77 for cli_dep in cli_deps:
78 cli_dep = cli_dep[len('cli:'):]
79 package, _, version = cli_dep.partition(':')
80 if version != '':
81 packages_to_install.add((package, '--version', version))
82 else:
83 packages_to_install.add((package,))
84
85 for package in packages_to_install:
86 cmd_output(
87 'cargo', 'install', '--bins', '--root', directory, *package,
88 cwd=prefix.prefix_dir
89 )
90
91
92 def run_hook(hook, file_args):
93 with in_env(hook.prefix):
94 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py
--- a/pre_commit/languages/rust.py
+++ b/pre_commit/languages/rust.py
@@ -73,7 +73,7 @@
_add_dependencies(prefix.path('Cargo.toml'), lib_deps)
with clean_path_on_failure(directory):
- packages_to_install = {()}
+ packages_to_install = {('--path', '.')}
for cli_dep in cli_deps:
cli_dep = cli_dep[len('cli:'):]
package, _, version = cli_dep.partition(':')
| {"golden_diff": "diff --git a/pre_commit/languages/rust.py b/pre_commit/languages/rust.py\n--- a/pre_commit/languages/rust.py\n+++ b/pre_commit/languages/rust.py\n@@ -73,7 +73,7 @@\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n \n with clean_path_on_failure(directory):\n- packages_to_install = {()}\n+ packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n", "issue": "rust hook requires `--path` attribute\nCargo has changed how packages get installed and requires an extra `--path <destination>` attribute.\r\n\r\nSymptom:\r\n```\r\n[INFO] Initializing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Installing environment for https://github.com/nix-community/nixpkgs-fmt.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/nix/store/fcc3x8zwq1c0667xjs7bkn6ay8j4fdpz-rust-1.38.0-nightly-2019-08-07-ad7c55e1f/bin/cargo', 'install', '--bins', '--root', '/home/zimbatm/.cache/pre-commit/repoeft6xm6t/rustenv-default')\r\nReturn code: 101\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors: \r\n error: Using `cargo install` to install the binaries for the package in current working directory is no longer supported, use `cargo install --path .` instead. Use `cargo build` if you want to simply build the package.\r\n```\r\n\r\nI guess the fix should be done where here: https://github.com/pre-commit/pre-commit/blob/9c6a1d80d6b94c86a1785a40a51389e83accac3e/pre_commit/languages/rust.py#L87\r\n\r\nDo we want to make pre-commit compatible with multiple versions of cargo or just the latest one?\r\n\r\n/cc @asottile @chriskuehl \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {()}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os.path\n\nimport toml\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'rustenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(target_dir):\n return (\n (\n 'PATH',\n (os.path.join(target_dir, 'bin'), os.pathsep, Var('PATH')),\n ),\n )\n\n\[email protected]\ndef in_env(prefix):\n target_dir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(target_dir)):\n yield\n\n\ndef _add_dependencies(cargo_toml_path, additional_dependencies):\n with open(cargo_toml_path, 'r+') as f:\n cargo_toml = toml.load(f)\n cargo_toml.setdefault('dependencies', {})\n for dep in additional_dependencies:\n name, _, spec = dep.partition(':')\n cargo_toml['dependencies'][name] = spec or '*'\n f.seek(0)\n toml.dump(cargo_toml, f)\n f.truncate()\n\n\ndef install_environment(prefix, version, additional_dependencies):\n helpers.assert_version_default('rust', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # There are two cases where we might want to specify more dependencies:\n # as dependencies for the library being built, and as binary packages\n # to be `cargo install`'d.\n #\n # Unlike e.g. Python, if we just `cargo install` a library, it won't be\n # used for compilation. And if we add a crate providing a binary to the\n # `Cargo.toml`, the binary won't be built.\n #\n # Because of this, we allow specifying \"cli\" dependencies by prefixing\n # with 'cli:'.\n cli_deps = {\n dep for dep in additional_dependencies if dep.startswith('cli:')\n }\n lib_deps = set(additional_dependencies) - cli_deps\n\n if len(lib_deps) > 0:\n _add_dependencies(prefix.path('Cargo.toml'), lib_deps)\n\n with clean_path_on_failure(directory):\n packages_to_install = {('--path', '.')}\n for cli_dep in cli_deps:\n cli_dep = cli_dep[len('cli:'):]\n package, _, version = cli_dep.partition(':')\n if version != '':\n packages_to_install.add((package, '--version', version))\n else:\n packages_to_install.add((package,))\n\n for package in packages_to_install:\n cmd_output(\n 'cargo', 'install', '--bins', '--root', directory, *package,\n cwd=prefix.prefix_dir\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/rust.py"}]} | 1,489 | 125 |
gh_patches_debug_17363 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-4009 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Jinja2: Handle non-string template names as span resources
Jinja2 template names may not be strings, but need to be serialized as strings prior to being set as a span resource. Problem areas can be found in `ddtrace/contrib/jinja2/patch.py` on lines 63, 81 and 97 where the value from Jinja2 is trusted to be a string, but may not be.
https://github.com/DataDog/dd-trace-py/blob/fb8dfa2f33fff37d21df9728d8386c0260df9744/ddtrace/contrib/jinja2/patch.py#L51-L100
Here is an example of the exception we are seeing when `span.finish` is called:
```
File "/opt/venv/lib/python3.8/site-packages/ddtrace/span.py", line 237, in finish
cb(self)
File "/opt/venv/lib/python3.8/site-packages/ddtrace/tracer.py", line 712, in _on_span_finish
p.on_span_finish(span)
File "/opt/venv/lib/python3.8/site-packages/ddtrace/internal/processor/trace.py", line 208, in on_span_finish
self._writer.write(spans)
File "/opt/venv/lib/python3.8/site-packages/ddtrace/internal/writer.py", line 518, in write
self._encoder.put(spans)
File "ddtrace/internal/_encoding.pyx", line 456, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 460, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 483, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 464, in ddtrace.internal._encoding.MsgpackEncoderBase.put
File "ddtrace/internal/_encoding.pyx", line 451, in ddtrace.internal._encoding.MsgpackEncoderBase._pack_trace
File "ddtrace/internal/_encoding.pyx", line 600, in ddtrace.internal._encoding.MsgpackEncoderV03.pack_span
File "ddtrace/internal/_encoding.pyx", line 142, in ddtrace.internal._encoding.pack_text
TypeError: Unhandled text type: <class 'int'>
```
Relevant pip dependencies:
datadogpy == 0.44.0
dd-trace-py == 1.2.3
jinja2 == 3.0.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/jinja2/patch.py`
Content:
```
1 import os
2
3 import jinja2
4
5 from ddtrace import config
6 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
7
8 from ...constants import SPAN_MEASURED_KEY
9 from ...ext import SpanTypes
10 from ...internal.utils import ArgumentError
11 from ...internal.utils import get_argument_value
12 from ...pin import Pin
13 from ..trace_utils import unwrap as _u
14 from .constants import DEFAULT_TEMPLATE_NAME
15
16
17 # default settings
18 config._add(
19 "jinja2",
20 {
21 "service_name": os.getenv("DD_JINJA2_SERVICE_NAME"),
22 },
23 )
24
25
26 def patch():
27 if getattr(jinja2, "__datadog_patch", False):
28 # already patched
29 return
30 setattr(jinja2, "__datadog_patch", True)
31 Pin(
32 service=config.jinja2["service_name"],
33 _config=config.jinja2,
34 ).onto(jinja2.environment.Environment)
35 _w(jinja2, "environment.Template.render", _wrap_render)
36 _w(jinja2, "environment.Template.generate", _wrap_render)
37 _w(jinja2, "environment.Environment.compile", _wrap_compile)
38 _w(jinja2, "environment.Environment._load_template", _wrap_load_template)
39
40
41 def unpatch():
42 if not getattr(jinja2, "__datadog_patch", False):
43 return
44 setattr(jinja2, "__datadog_patch", False)
45 _u(jinja2.Template, "render")
46 _u(jinja2.Template, "generate")
47 _u(jinja2.Environment, "compile")
48 _u(jinja2.Environment, "_load_template")
49
50
51 def _wrap_render(wrapped, instance, args, kwargs):
52 """Wrap `Template.render()` or `Template.generate()`"""
53 pin = Pin.get_from(instance.environment)
54 if not pin or not pin.enabled():
55 return wrapped(*args, **kwargs)
56
57 template_name = instance.name or DEFAULT_TEMPLATE_NAME
58 with pin.tracer.trace("jinja2.render", pin.service, span_type=SpanTypes.TEMPLATE) as span:
59 span.set_tag(SPAN_MEASURED_KEY)
60 try:
61 return wrapped(*args, **kwargs)
62 finally:
63 span.resource = template_name
64 span.set_tag("jinja2.template_name", template_name)
65
66
67 def _wrap_compile(wrapped, instance, args, kwargs):
68 pin = Pin.get_from(instance)
69 if not pin or not pin.enabled():
70 return wrapped(*args, **kwargs)
71
72 try:
73 template_name = get_argument_value(args, kwargs, 1, "name")
74 except ArgumentError:
75 template_name = DEFAULT_TEMPLATE_NAME
76
77 with pin.tracer.trace("jinja2.compile", pin.service, span_type=SpanTypes.TEMPLATE) as span:
78 try:
79 return wrapped(*args, **kwargs)
80 finally:
81 span.resource = template_name
82 span.set_tag("jinja2.template_name", template_name)
83
84
85 def _wrap_load_template(wrapped, instance, args, kwargs):
86 pin = Pin.get_from(instance)
87 if not pin or not pin.enabled():
88 return wrapped(*args, **kwargs)
89
90 template_name = get_argument_value(args, kwargs, 0, "name")
91 with pin.tracer.trace("jinja2.load", pin.service, span_type=SpanTypes.TEMPLATE) as span:
92 template = None
93 try:
94 template = wrapped(*args, **kwargs)
95 return template
96 finally:
97 span.resource = template_name
98 span.set_tag("jinja2.template_name", template_name)
99 if template:
100 span.set_tag("jinja2.template_path", template.filename)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/jinja2/patch.py b/ddtrace/contrib/jinja2/patch.py
--- a/ddtrace/contrib/jinja2/patch.py
+++ b/ddtrace/contrib/jinja2/patch.py
@@ -7,6 +7,7 @@
from ...constants import SPAN_MEASURED_KEY
from ...ext import SpanTypes
+from ...internal.compat import stringify
from ...internal.utils import ArgumentError
from ...internal.utils import get_argument_value
from ...pin import Pin
@@ -54,7 +55,7 @@
if not pin or not pin.enabled():
return wrapped(*args, **kwargs)
- template_name = instance.name or DEFAULT_TEMPLATE_NAME
+ template_name = stringify(instance.name or DEFAULT_TEMPLATE_NAME)
with pin.tracer.trace("jinja2.render", pin.service, span_type=SpanTypes.TEMPLATE) as span:
span.set_tag(SPAN_MEASURED_KEY)
try:
| {"golden_diff": "diff --git a/ddtrace/contrib/jinja2/patch.py b/ddtrace/contrib/jinja2/patch.py\n--- a/ddtrace/contrib/jinja2/patch.py\n+++ b/ddtrace/contrib/jinja2/patch.py\n@@ -7,6 +7,7 @@\n \n from ...constants import SPAN_MEASURED_KEY\n from ...ext import SpanTypes\n+from ...internal.compat import stringify\n from ...internal.utils import ArgumentError\n from ...internal.utils import get_argument_value\n from ...pin import Pin\n@@ -54,7 +55,7 @@\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n \n- template_name = instance.name or DEFAULT_TEMPLATE_NAME\n+ template_name = stringify(instance.name or DEFAULT_TEMPLATE_NAME)\n with pin.tracer.trace(\"jinja2.render\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n try:\n", "issue": "Jinja2: Handle non-string template names as span resources\nJinja2 template names may not be strings, but need to be serialized as strings prior to being set as a span resource. Problem areas can be found in `ddtrace/contrib/jinja2/patch.py` on lines 63, 81 and 97 where the value from Jinja2 is trusted to be a string, but may not be.\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/fb8dfa2f33fff37d21df9728d8386c0260df9744/ddtrace/contrib/jinja2/patch.py#L51-L100\r\n\r\n\r\nHere is an example of the exception we are seeing when `span.finish` is called:\r\n\r\n```\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/span.py\", line 237, in finish\r\n cb(self)\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/tracer.py\", line 712, in _on_span_finish\r\n p.on_span_finish(span)\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/internal/processor/trace.py\", line 208, in on_span_finish\r\n self._writer.write(spans)\r\n File \"/opt/venv/lib/python3.8/site-packages/ddtrace/internal/writer.py\", line 518, in write\r\n self._encoder.put(spans)\r\n File \"ddtrace/internal/_encoding.pyx\", line 456, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 460, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 483, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 464, in ddtrace.internal._encoding.MsgpackEncoderBase.put\r\n File \"ddtrace/internal/_encoding.pyx\", line 451, in ddtrace.internal._encoding.MsgpackEncoderBase._pack_trace\r\n File \"ddtrace/internal/_encoding.pyx\", line 600, in ddtrace.internal._encoding.MsgpackEncoderV03.pack_span\r\n File \"ddtrace/internal/_encoding.pyx\", line 142, in ddtrace.internal._encoding.pack_text\r\nTypeError: Unhandled text type: <class 'int'>\r\n```\r\n\r\nRelevant pip dependencies:\r\ndatadogpy == 0.44.0\r\ndd-trace-py == 1.2.3\r\njinja2 == 3.0.3\r\n\n", "before_files": [{"content": "import os\n\nimport jinja2\n\nfrom ddtrace import config\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...internal.utils import ArgumentError\nfrom ...internal.utils import get_argument_value\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap as _u\nfrom .constants import DEFAULT_TEMPLATE_NAME\n\n\n# default settings\nconfig._add(\n \"jinja2\",\n {\n \"service_name\": os.getenv(\"DD_JINJA2_SERVICE_NAME\"),\n },\n)\n\n\ndef patch():\n if getattr(jinja2, \"__datadog_patch\", False):\n # already patched\n return\n setattr(jinja2, \"__datadog_patch\", True)\n Pin(\n service=config.jinja2[\"service_name\"],\n _config=config.jinja2,\n ).onto(jinja2.environment.Environment)\n _w(jinja2, \"environment.Template.render\", _wrap_render)\n _w(jinja2, \"environment.Template.generate\", _wrap_render)\n _w(jinja2, \"environment.Environment.compile\", _wrap_compile)\n _w(jinja2, \"environment.Environment._load_template\", _wrap_load_template)\n\n\ndef unpatch():\n if not getattr(jinja2, \"__datadog_patch\", False):\n return\n setattr(jinja2, \"__datadog_patch\", False)\n _u(jinja2.Template, \"render\")\n _u(jinja2.Template, \"generate\")\n _u(jinja2.Environment, \"compile\")\n _u(jinja2.Environment, \"_load_template\")\n\n\ndef _wrap_render(wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\"\"\"\n pin = Pin.get_from(instance.environment)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n with pin.tracer.trace(\"jinja2.render\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n try:\n return wrapped(*args, **kwargs)\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n\n\ndef _wrap_compile(wrapped, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n try:\n template_name = get_argument_value(args, kwargs, 1, \"name\")\n except ArgumentError:\n template_name = DEFAULT_TEMPLATE_NAME\n\n with pin.tracer.trace(\"jinja2.compile\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n try:\n return wrapped(*args, **kwargs)\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n\n\ndef _wrap_load_template(wrapped, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n template_name = get_argument_value(args, kwargs, 0, \"name\")\n with pin.tracer.trace(\"jinja2.load\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n if template:\n span.set_tag(\"jinja2.template_path\", template.filename)\n", "path": "ddtrace/contrib/jinja2/patch.py"}], "after_files": [{"content": "import os\n\nimport jinja2\n\nfrom ddtrace import config\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...internal.compat import stringify\nfrom ...internal.utils import ArgumentError\nfrom ...internal.utils import get_argument_value\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap as _u\nfrom .constants import DEFAULT_TEMPLATE_NAME\n\n\n# default settings\nconfig._add(\n \"jinja2\",\n {\n \"service_name\": os.getenv(\"DD_JINJA2_SERVICE_NAME\"),\n },\n)\n\n\ndef patch():\n if getattr(jinja2, \"__datadog_patch\", False):\n # already patched\n return\n setattr(jinja2, \"__datadog_patch\", True)\n Pin(\n service=config.jinja2[\"service_name\"],\n _config=config.jinja2,\n ).onto(jinja2.environment.Environment)\n _w(jinja2, \"environment.Template.render\", _wrap_render)\n _w(jinja2, \"environment.Template.generate\", _wrap_render)\n _w(jinja2, \"environment.Environment.compile\", _wrap_compile)\n _w(jinja2, \"environment.Environment._load_template\", _wrap_load_template)\n\n\ndef unpatch():\n if not getattr(jinja2, \"__datadog_patch\", False):\n return\n setattr(jinja2, \"__datadog_patch\", False)\n _u(jinja2.Template, \"render\")\n _u(jinja2.Template, \"generate\")\n _u(jinja2.Environment, \"compile\")\n _u(jinja2.Environment, \"_load_template\")\n\n\ndef _wrap_render(wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\"\"\"\n pin = Pin.get_from(instance.environment)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n template_name = stringify(instance.name or DEFAULT_TEMPLATE_NAME)\n with pin.tracer.trace(\"jinja2.render\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n try:\n return wrapped(*args, **kwargs)\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n\n\ndef _wrap_compile(wrapped, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n try:\n template_name = get_argument_value(args, kwargs, 1, \"name\")\n except ArgumentError:\n template_name = DEFAULT_TEMPLATE_NAME\n\n with pin.tracer.trace(\"jinja2.compile\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n try:\n return wrapped(*args, **kwargs)\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n\n\ndef _wrap_load_template(wrapped, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return wrapped(*args, **kwargs)\n\n template_name = get_argument_value(args, kwargs, 0, \"name\")\n with pin.tracer.trace(\"jinja2.load\", pin.service, span_type=SpanTypes.TEMPLATE) as span:\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n span.resource = template_name\n span.set_tag(\"jinja2.template_name\", template_name)\n if template:\n span.set_tag(\"jinja2.template_path\", template.filename)\n", "path": "ddtrace/contrib/jinja2/patch.py"}]} | 1,822 | 207 |
gh_patches_debug_3321 | rasdani/github-patches | git_diff | chainer__chainer-7167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Wrong initialization of the Linear link weights when called with n_batch_axes>1
# **Overview**
When creating the _Linear_ link without specifying the __input_size__ in the constructor and then calling the link with the parameter __n_batch_axes__ > 1, the weight matrix is initialized with a wrong shape and causes an error.
# **Conditions**
Platform: Windows-7-6.1.7601-SP1
Chainer: 5.4.0
NumPy: 1.15.4
CuPy: Not Available
iDeep: Not Available
# Code to reproduce
```
import numpy as np
import chainer
batch_size, seq_len, num_of_features, out_features = (1, 4, 3, 8)
linear_layer_chainer = chainer.links.Linear(out_features)
data = chainer.Variable(np.ones((batch_size, seq_len, num_of_features), dtype=np.float32))
results = linear_layer_chainer (data, n_batch_axes=2)
```
# **Error messages, stack traces, or logs**
```
Traceback (most recent call last):
File "C:\Users\ntt\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-9-9df2914c2f5a>", line 1, in <module>
results = linear_layer_chainer_2_axes(data, n_batch_axes=2)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\link.py", line 242, in __call__
out = forward(*args, **kwargs)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\links\connection\linear.py", line 138, in forward
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\functions\connection\linear.py", line 288, in linear
y, = LinearFunction().apply(args)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\function_node.py", line 245, in apply
self._check_data_type_forward(in_data)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\function_node.py", line 330, in _check_data_type_forward
self.check_type_forward(in_type)
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\functions\connection\linear.py", line 27, in check_type_forward
x_type.shape[1] == w_type.shape[1],
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\utils\type_check.py", line 546, in expect
expr.expect()
File "C:\Users\ntt\Anaconda3\lib\site-packages\chainer\utils\type_check.py", line 483, in expect
'{0} {1} {2}'.format(left, self.inv, right))
chainer.utils.type_check.InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 3 != 12
```
# Others
If you create the _Linear_ layer link with __in_size__ passed explicitly to the correct shape, it works:
```
working_linear = chainer.links.Linear(num_of_features, out_features)
working_results = working_linear(data, n_batch_axes=2)
working_results.shape
```
Output : `(1, 4, 8)`
# Solution
I believe the culprit lays in the _Linear_ link _forward_ method:
```
def forward(self, x, n_batch_axes=1):
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vectors.
n_batch_axes (int): The number of batch axes. The default is 1. The
input variable is reshaped into
(:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
This should be greater than 0.
Returns:
~chainer.Variable: Output of the linear layer.
"""
if self.W.array is None:
in_size = functools.reduce(operator.mul, x.shape[1:], 1)
self._initialize_params(in_size)
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
```
If I interpret this part correctly, it does not take into account the _n_batch_axes_ parameter when initializing the weight matrix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/links/connection/linear.py`
Content:
```
1 import typing as tp # NOQA
2
3 from chainer.functions.connection import linear
4 from chainer import initializers
5 from chainer import link
6 from chainer import types # NOQA
7 from chainer import utils
8 from chainer import variable
9
10
11 class Linear(link.Link):
12
13 """Linear layer (a.k.a.\\ fully-connected layer).
14
15 This is a link that wraps the :func:`~chainer.functions.linear` function,
16 and holds a weight matrix ``W`` and optionally a bias vector ``b`` as
17 parameters.
18
19 If ``initialW`` is left to the default value of ``None``, the weight matrix
20 ``W`` is initialized with i.i.d. Gaussian samples, each of which has zero
21 mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The bias vector ``b``
22 is of size ``out_size``. If the ``initial_bias`` is to left the default
23 value of ``None``, each element is initialized as zero. If the ``nobias``
24 argument is set to ``True``, then this link does not hold a bias vector.
25
26 Args:
27 in_size (int or None): Dimension of input vectors. If unspecified or
28 ``None``, parameter initialization will be deferred until the
29 first forward data pass at which time the size will be determined.
30 out_size (int): Dimension of output vectors. If only one value is
31 passed for ``in_size`` and ``out_size``, that value will be used
32 for the ``out_size`` dimension.
33 nobias (bool): If ``True``, then this function does not use the bias.
34 initialW (:ref:`initializer <initializer>`): Initializer to initialize
35 the weight. When it is :class:`numpy.ndarray`,
36 its ``ndim`` should be 2. If ``initialW`` is ``None``, then the
37 weights are initialized with i.i.d. Gaussian samples, each of which
38 has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`.
39 initial_bias (:ref:`initializer <initializer>`): Initializer to
40 initialize the bias. If ``None``, the bias will be initialized to
41 zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
42 .. seealso:: :func:`~chainer.functions.linear`
43
44 Attributes:
45 W (~chainer.Variable): Weight parameter.
46 b (~chainer.Variable): Bias parameter.
47
48 .. admonition:: Example
49
50 There are several ways to make a Linear link.
51
52 Define an input vector ``x`` as:
53
54 >>> x = np.array([[0, 1, 2, 3, 4]], np.float32)
55
56 1. Give the first two arguments explicitly:
57
58 Those numbers are considered as the input size and the output size.
59
60 >>> l = L.Linear(5, 10)
61 >>> y = l(x)
62 >>> y.shape
63 (1, 10)
64
65 2. Omit ``in_size`` (give the output size only as the first argument)
66 or fill it with ``None``:
67
68 In this case, the size of second axis of ``x`` is used as the
69 input size. So the below two cases are the same.
70
71 >>> l = L.Linear(10)
72 >>> y = l(x)
73 >>> y.shape
74 (1, 10)
75
76 >>> l = L.Linear(None, 10)
77 >>> y = l(x)
78 >>> y.shape
79 (1, 10)
80
81 When you omit the first argument, you need to specify the other
82 subsequent arguments from ``nobias`` as keyword arguments. So the
83 below two cases are the same.
84
85 >>> l = L.Linear(None, 10, False, None, 0)
86 >>> y = l(x)
87 >>> y.shape
88 (1, 10)
89
90 >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)
91 >>> y = l(x)
92 >>> y.shape
93 (1, 10)
94
95 """
96
97 def __init__(self, in_size, out_size=None, nobias=False,
98 initialW=None, initial_bias=None):
99 # type: (tp.Optional[int], tp.Optional[int], bool, tp.Optional[types.InitializerSpec], tp.Optional[types.InitializerSpec]) -> None # NOQA
100
101 super(Linear, self).__init__()
102
103 if out_size is None:
104 in_size, out_size = None, in_size
105 self.in_size = in_size
106 self.out_size = out_size
107
108 with self.init_scope():
109 W_initializer = initializers._get_initializer(initialW)
110 self.W = variable.Parameter(W_initializer) # type: variable.Variable # NOQA
111 if in_size is not None:
112 self._initialize_params(in_size)
113
114 if nobias:
115 self.b = None # type: tp.Optional[variable.Variable]
116 else:
117 if initial_bias is None:
118 initial_bias = 0
119 bias_initializer = initializers._get_initializer(initial_bias)
120 self.b = variable.Parameter(bias_initializer, out_size)
121
122 def _initialize_params(self, in_size):
123 # type: (int) -> None
124
125 self.W.initialize((self.out_size, in_size)) # type: ignore
126
127 @property
128 def printable_specs(self):
129 specs = [
130 ('in_size', self.in_size),
131 ('out_size', self.out_size),
132 ('nobias', self.b is None),
133 ]
134 for spec in specs:
135 yield spec
136
137 def forward(self, x, n_batch_axes=1):
138 # type: (variable.Variable, int) -> variable.Variable
139 """Applies the linear layer.
140
141 Args:
142 x (~chainer.Variable): Batch of input vectors.
143 n_batch_axes (int): The number of batch axes. The default is 1. The
144 input variable is reshaped into
145 (:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
146 This should be greater than 0.
147
148 Returns:
149 ~chainer.Variable: Output of the linear layer.
150
151 """
152 if self.W.array is None:
153 in_size = utils.size_of_shape(x.shape[1:])
154 self._initialize_params(in_size)
155 return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py
--- a/chainer/links/connection/linear.py
+++ b/chainer/links/connection/linear.py
@@ -150,6 +150,6 @@
"""
if self.W.array is None:
- in_size = utils.size_of_shape(x.shape[1:])
+ in_size = utils.size_of_shape(x.shape[n_batch_axes:])
self._initialize_params(in_size)
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
| {"golden_diff": "diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py\n--- a/chainer/links/connection/linear.py\n+++ b/chainer/links/connection/linear.py\n@@ -150,6 +150,6 @@\n \n \"\"\"\n if self.W.array is None:\n- in_size = utils.size_of_shape(x.shape[1:])\n+ in_size = utils.size_of_shape(x.shape[n_batch_axes:])\n self._initialize_params(in_size)\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\n", "issue": "[BUG] Wrong initialization of the Linear link weights when called with n_batch_axes>1\n# **Overview**\r\nWhen creating the _Linear_ link without specifying the __input_size__ in the constructor and then calling the link with the parameter __n_batch_axes__ > 1, the weight matrix is initialized with a wrong shape and causes an error.\r\n\r\n# **Conditions**\r\nPlatform: Windows-7-6.1.7601-SP1\r\nChainer: 5.4.0\r\nNumPy: 1.15.4\r\nCuPy: Not Available\r\niDeep: Not Available\r\n\r\n# Code to reproduce\r\n```\r\nimport numpy as np\r\nimport chainer\r\nbatch_size, seq_len, num_of_features, out_features = (1, 4, 3, 8)\r\nlinear_layer_chainer = chainer.links.Linear(out_features)\r\ndata = chainer.Variable(np.ones((batch_size, seq_len, num_of_features), dtype=np.float32))\r\nresults = linear_layer_chainer (data, n_batch_axes=2)\r\n```\r\n\r\n\r\n# **Error messages, stack traces, or logs**\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3267, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-9-9df2914c2f5a>\", line 1, in <module>\r\n results = linear_layer_chainer_2_axes(data, n_batch_axes=2)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\link.py\", line 242, in __call__\r\n out = forward(*args, **kwargs)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\links\\connection\\linear.py\", line 138, in forward\r\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\functions\\connection\\linear.py\", line 288, in linear\r\n y, = LinearFunction().apply(args)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\function_node.py\", line 245, in apply\r\n self._check_data_type_forward(in_data)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\function_node.py\", line 330, in _check_data_type_forward\r\n self.check_type_forward(in_type)\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\functions\\connection\\linear.py\", line 27, in check_type_forward\r\n x_type.shape[1] == w_type.shape[1],\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\utils\\type_check.py\", line 546, in expect\r\n expr.expect()\r\n File \"C:\\Users\\ntt\\Anaconda3\\lib\\site-packages\\chainer\\utils\\type_check.py\", line 483, in expect\r\n '{0} {1} {2}'.format(left, self.inv, right))\r\nchainer.utils.type_check.InvalidType: \r\nInvalid operation is performed in: LinearFunction (Forward)\r\nExpect: x.shape[1] == W.shape[1]\r\nActual: 3 != 12\r\n```\r\n\r\n# Others \r\nIf you create the _Linear_ layer link with __in_size__ passed explicitly to the correct shape, it works:\r\n\r\n```\r\nworking_linear = chainer.links.Linear(num_of_features, out_features)\r\nworking_results = working_linear(data, n_batch_axes=2)\r\nworking_results.shape\r\n```\r\nOutput : `(1, 4, 8)`\r\n\r\n# Solution \r\nI believe the culprit lays in the _Linear_ link _forward_ method:\r\n```\r\n def forward(self, x, n_batch_axes=1):\r\n \"\"\"Applies the linear layer.\r\n\r\n Args:\r\n x (~chainer.Variable): Batch of input vectors.\r\n n_batch_axes (int): The number of batch axes. The default is 1. The\r\n input variable is reshaped into\r\n (:math:`{\\\\rm n\\\\_batch\\\\_axes} + 1`)-dimensional tensor.\r\n This should be greater than 0.\r\n\r\n Returns:\r\n ~chainer.Variable: Output of the linear layer.\r\n\r\n \"\"\"\r\n if self.W.array is None:\r\n in_size = functools.reduce(operator.mul, x.shape[1:], 1)\r\n self._initialize_params(in_size)\r\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\r\n```\r\nIf I interpret this part correctly, it does not take into account the _n_batch_axes_ parameter when initializing the weight matrix.\r\n\r\n\n", "before_files": [{"content": "import typing as tp # NOQA\n\nfrom chainer.functions.connection import linear\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer import types # NOQA\nfrom chainer import utils\nfrom chainer import variable\n\n\nclass Linear(link.Link):\n\n \"\"\"Linear layer (a.k.a.\\\\ fully-connected layer).\n\n This is a link that wraps the :func:`~chainer.functions.linear` function,\n and holds a weight matrix ``W`` and optionally a bias vector ``b`` as\n parameters.\n\n If ``initialW`` is left to the default value of ``None``, the weight matrix\n ``W`` is initialized with i.i.d. Gaussian samples, each of which has zero\n mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`. The bias vector ``b``\n is of size ``out_size``. If the ``initial_bias`` is to left the default\n value of ``None``, each element is initialized as zero. If the ``nobias``\n argument is set to ``True``, then this link does not hold a bias vector.\n\n Args:\n in_size (int or None): Dimension of input vectors. If unspecified or\n ``None``, parameter initialization will be deferred until the\n first forward data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors. If only one value is\n passed for ``in_size`` and ``out_size``, that value will be used\n for the ``out_size`` dimension.\n nobias (bool): If ``True``, then this function does not use the bias.\n initialW (:ref:`initializer <initializer>`): Initializer to initialize\n the weight. When it is :class:`numpy.ndarray`,\n its ``ndim`` should be 2. If ``initialW`` is ``None``, then the\n weights are initialized with i.i.d. Gaussian samples, each of which\n has zero mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`.\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n .. seealso:: :func:`~chainer.functions.linear`\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter.\n\n .. admonition:: Example\n\n There are several ways to make a Linear link.\n\n Define an input vector ``x`` as:\n\n >>> x = np.array([[0, 1, 2, 3, 4]], np.float32)\n\n 1. Give the first two arguments explicitly:\n\n Those numbers are considered as the input size and the output size.\n\n >>> l = L.Linear(5, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n 2. Omit ``in_size`` (give the output size only as the first argument)\n or fill it with ``None``:\n\n In this case, the size of second axis of ``x`` is used as the\n input size. So the below two cases are the same.\n\n >>> l = L.Linear(10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(None, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``nobias`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Linear(None, 10, False, None, 0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n \"\"\"\n\n def __init__(self, in_size, out_size=None, nobias=False,\n initialW=None, initial_bias=None):\n # type: (tp.Optional[int], tp.Optional[int], bool, tp.Optional[types.InitializerSpec], tp.Optional[types.InitializerSpec]) -> None # NOQA\n\n super(Linear, self).__init__()\n\n if out_size is None:\n in_size, out_size = None, in_size\n self.in_size = in_size\n self.out_size = out_size\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer) # type: variable.Variable # NOQA\n if in_size is not None:\n self._initialize_params(in_size)\n\n if nobias:\n self.b = None # type: tp.Optional[variable.Variable]\n else:\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_size)\n\n def _initialize_params(self, in_size):\n # type: (int) -> None\n\n self.W.initialize((self.out_size, in_size)) # type: ignore\n\n @property\n def printable_specs(self):\n specs = [\n ('in_size', self.in_size),\n ('out_size', self.out_size),\n ('nobias', self.b is None),\n ]\n for spec in specs:\n yield spec\n\n def forward(self, x, n_batch_axes=1):\n # type: (variable.Variable, int) -> variable.Variable\n \"\"\"Applies the linear layer.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n n_batch_axes (int): The number of batch axes. The default is 1. The\n input variable is reshaped into\n (:math:`{\\\\rm n\\\\_batch\\\\_axes} + 1`)-dimensional tensor.\n This should be greater than 0.\n\n Returns:\n ~chainer.Variable: Output of the linear layer.\n\n \"\"\"\n if self.W.array is None:\n in_size = utils.size_of_shape(x.shape[1:])\n self._initialize_params(in_size)\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\n", "path": "chainer/links/connection/linear.py"}], "after_files": [{"content": "import typing as tp # NOQA\n\nfrom chainer.functions.connection import linear\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer import types # NOQA\nfrom chainer import utils\nfrom chainer import variable\n\n\nclass Linear(link.Link):\n\n \"\"\"Linear layer (a.k.a.\\\\ fully-connected layer).\n\n This is a link that wraps the :func:`~chainer.functions.linear` function,\n and holds a weight matrix ``W`` and optionally a bias vector ``b`` as\n parameters.\n\n If ``initialW`` is left to the default value of ``None``, the weight matrix\n ``W`` is initialized with i.i.d. Gaussian samples, each of which has zero\n mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`. The bias vector ``b``\n is of size ``out_size``. If the ``initial_bias`` is to left the default\n value of ``None``, each element is initialized as zero. If the ``nobias``\n argument is set to ``True``, then this link does not hold a bias vector.\n\n Args:\n in_size (int or None): Dimension of input vectors. If unspecified or\n ``None``, parameter initialization will be deferred until the\n first forward data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors. If only one value is\n passed for ``in_size`` and ``out_size``, that value will be used\n for the ``out_size`` dimension.\n nobias (bool): If ``True``, then this function does not use the bias.\n initialW (:ref:`initializer <initializer>`): Initializer to initialize\n the weight. When it is :class:`numpy.ndarray`,\n its ``ndim`` should be 2. If ``initialW`` is ``None``, then the\n weights are initialized with i.i.d. Gaussian samples, each of which\n has zero mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`.\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n .. seealso:: :func:`~chainer.functions.linear`\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter.\n\n .. admonition:: Example\n\n There are several ways to make a Linear link.\n\n Define an input vector ``x`` as:\n\n >>> x = np.array([[0, 1, 2, 3, 4]], np.float32)\n\n 1. Give the first two arguments explicitly:\n\n Those numbers are considered as the input size and the output size.\n\n >>> l = L.Linear(5, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n 2. Omit ``in_size`` (give the output size only as the first argument)\n or fill it with ``None``:\n\n In this case, the size of second axis of ``x`` is used as the\n input size. So the below two cases are the same.\n\n >>> l = L.Linear(10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(None, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``nobias`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Linear(None, 10, False, None, 0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n \"\"\"\n\n def __init__(self, in_size, out_size=None, nobias=False,\n initialW=None, initial_bias=None):\n # type: (tp.Optional[int], tp.Optional[int], bool, tp.Optional[types.InitializerSpec], tp.Optional[types.InitializerSpec]) -> None # NOQA\n\n super(Linear, self).__init__()\n\n if out_size is None:\n in_size, out_size = None, in_size\n self.in_size = in_size\n self.out_size = out_size\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer) # type: variable.Variable # NOQA\n if in_size is not None:\n self._initialize_params(in_size)\n\n if nobias:\n self.b = None # type: tp.Optional[variable.Variable]\n else:\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_size)\n\n def _initialize_params(self, in_size):\n # type: (int) -> None\n\n self.W.initialize((self.out_size, in_size)) # type: ignore\n\n @property\n def printable_specs(self):\n specs = [\n ('in_size', self.in_size),\n ('out_size', self.out_size),\n ('nobias', self.b is None),\n ]\n for spec in specs:\n yield spec\n\n def forward(self, x, n_batch_axes=1):\n # type: (variable.Variable, int) -> variable.Variable\n \"\"\"Applies the linear layer.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n n_batch_axes (int): The number of batch axes. The default is 1. The\n input variable is reshaped into\n (:math:`{\\\\rm n\\\\_batch\\\\_axes} + 1`)-dimensional tensor.\n This should be greater than 0.\n\n Returns:\n ~chainer.Variable: Output of the linear layer.\n\n \"\"\"\n if self.W.array is None:\n in_size = utils.size_of_shape(x.shape[n_batch_axes:])\n self._initialize_params(in_size)\n return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)\n", "path": "chainer/links/connection/linear.py"}]} | 3,101 | 125 |
gh_patches_debug_22154 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New connection creation flow does not handle schema creation failure scenarios
## Description
1. Create a database manually, say `test_db`, and create a schema called `Library Management` in it.
1. In Mathesar, create a new connection to the database.
1. While creating the connection, in the "New PostgreSQL Database Connection" modal, check the "Library Management" schema in the "Schemas to install" field.

1. Click on 'Add Connection'.
1. An error is thrown:
- `"(psycopg2.errors.DuplicateSchema) schema \"Library Management\" already exists\n\n[SQL: CREATE SCHEMA \"Library Management\";]\n(Background on this error at: https://sqlalche.me/e/14/f405)"`.
- This is expected.
1. The modal remains open. Uncheck the "Library Management" schema, and try clicking on 'Add Connection' again.
1. Another error is thrown:
- `duplicate key value violates unique constraint "mathesar_database_name_key" DETAIL: Key (name)=(pavish_install_test_1) already exists.`
- From the user perpective, there's no clear next step for them to take when this happens.
1. Close the modal and refresh the page, notice that the connection has been created already.
1. Open the connection, and notice that it shows `Schemas (0)`.
1. Forcing a manual reflection using the "Sync external changes" button shows the public schema and existing tables.
### Note:
* I performed the above steps 1-3 only as a means to force an error in the schema creation flow. While testing, I encountered an error which occurred due to an unknown reason.
* This issue primarily focuses on steps 4-10.
## Expected behavior
* When an error occurs, there should not be an inconsistent middle state in the DB vs the UI.
* When schema creation fails, we should try to revert back to the original state, or we should make it extra clear on the UI that it's a multi-step process.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/utils/connections.py`
Content:
```
1 """Utilities to help with creating and managing connections in Mathesar."""
2 from psycopg2.errors import DuplicateSchema
3 from sqlalchemy.exc import OperationalError
4 from mathesar.models.base import Database
5 from db import install, connection as dbconn
6 from mathesar.state import reset_reflection
7 from demo.install.library_dataset import load_library_dataset
8 from demo.install.movies_dataset import load_movies_dataset
9
10
11 class BadInstallationTarget(Exception):
12 """Raise when an attempt is made to install on a disallowed target"""
13 pass
14
15
16 def copy_connection_from_preexisting(
17 connection, nickname, db_name, create_db, sample_data
18 ):
19 if connection['connection_type'] == 'internal_database':
20 db_model = Database.create_from_settings_key('default')
21 elif connection['connection_type'] == 'user_database':
22 db_model = Database.current_objects.get(id=connection['id'])
23 db_model.id = None
24 else:
25 raise KeyError("connection_type")
26 root_db = db_model.db_name
27 return _save_and_install(
28 db_model, db_name, root_db, nickname, create_db, sample_data
29 )
30
31
32 def create_connection_from_scratch(
33 user, password, host, port, nickname, db_name, sample_data
34 ):
35 db_model = Database(username=user, password=password, host=host, port=port)
36 root_db = db_name
37 return _save_and_install(
38 db_model, db_name, root_db, nickname, False, sample_data
39 )
40
41
42 def create_connection_with_new_user(
43 connection, user, password, nickname, db_name, create_db, sample_data
44 ):
45 db_model = copy_connection_from_preexisting(
46 connection, nickname, db_name, create_db, []
47 )
48 engine = db_model._sa_engine
49 db_model.username = user
50 db_model.password = password
51 db_model.save()
52 dbconn.execute_msar_func_with_engine(
53 engine,
54 'create_basic_mathesar_user',
55 db_model.username,
56 db_model.password
57 )
58 _load_sample_data(db_model._sa_engine, sample_data)
59 return db_model
60
61
62 def _save_and_install(
63 db_model, db_name, root_db, nickname, create_db, sample_data
64 ):
65 db_model.name = nickname
66 db_model.db_name = db_name
67 _validate_db_model(db_model)
68 db_model.save()
69 try:
70 install.install_mathesar(
71 database_name=db_model.db_name,
72 username=db_model.username,
73 password=db_model.password,
74 hostname=db_model.host,
75 port=db_model.port,
76 skip_confirm=True,
77 create_db=create_db,
78 root_db=root_db,
79 )
80 except OperationalError as e:
81 db_model.delete()
82 raise e
83 _load_sample_data(db_model._sa_engine, sample_data)
84 return db_model
85
86
87 def _load_sample_data(engine, sample_data):
88 DATASET_MAP = {
89 'library_management': load_library_dataset,
90 'movie_collection': load_movies_dataset,
91 }
92 for key in sample_data:
93 try:
94 DATASET_MAP[key](engine, safe_mode=True)
95 except DuplicateSchema:
96 # We swallow this error, since otherwise we'll raise an error on the
97 # front end even though installation generally succeeded.
98 continue
99 reset_reflection()
100
101
102 def _validate_db_model(db_model):
103 internal_db_model = Database.create_from_settings_key('default')
104 if (
105 internal_db_model is not None
106 and db_model.host == internal_db_model.host
107 and db_model.port == internal_db_model.port
108 and db_model.db_name == internal_db_model.db_name
109 ):
110 raise BadInstallationTarget(
111 "Mathesar can't be installed in the internal DB namespace"
112 )
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/utils/connections.py b/mathesar/utils/connections.py
--- a/mathesar/utils/connections.py
+++ b/mathesar/utils/connections.py
@@ -1,6 +1,6 @@
"""Utilities to help with creating and managing connections in Mathesar."""
from psycopg2.errors import DuplicateSchema
-from sqlalchemy.exc import OperationalError
+from sqlalchemy.exc import OperationalError, ProgrammingError
from mathesar.models.base import Database
from db import install, connection as dbconn
from mathesar.state import reset_reflection
@@ -92,10 +92,11 @@
for key in sample_data:
try:
DATASET_MAP[key](engine, safe_mode=True)
- except DuplicateSchema:
- # We swallow this error, since otherwise we'll raise an error on the
- # front end even though installation generally succeeded.
- continue
+ except ProgrammingError as e:
+ if isinstance(e.orig, DuplicateSchema):
+ # We swallow this error, since otherwise we'll raise an error on the
+ # front end even though installation generally succeeded.
+ continue
reset_reflection()
| {"golden_diff": "diff --git a/mathesar/utils/connections.py b/mathesar/utils/connections.py\n--- a/mathesar/utils/connections.py\n+++ b/mathesar/utils/connections.py\n@@ -1,6 +1,6 @@\n \"\"\"Utilities to help with creating and managing connections in Mathesar.\"\"\"\n from psycopg2.errors import DuplicateSchema\n-from sqlalchemy.exc import OperationalError\n+from sqlalchemy.exc import OperationalError, ProgrammingError\n from mathesar.models.base import Database\n from db import install, connection as dbconn\n from mathesar.state import reset_reflection\n@@ -92,10 +92,11 @@\n for key in sample_data:\n try:\n DATASET_MAP[key](engine, safe_mode=True)\n- except DuplicateSchema:\n- # We swallow this error, since otherwise we'll raise an error on the\n- # front end even though installation generally succeeded.\n- continue\n+ except ProgrammingError as e:\n+ if isinstance(e.orig, DuplicateSchema):\n+ # We swallow this error, since otherwise we'll raise an error on the\n+ # front end even though installation generally succeeded.\n+ continue\n reset_reflection()\n", "issue": "New connection creation flow does not handle schema creation failure scenarios\n## Description\r\n1. Create a database manually, say `test_db`, and create a schema called `Library Management` in it.\r\n1. In Mathesar, create a new connection to the database.\r\n1. While creating the connection, in the \"New PostgreSQL Database Connection\" modal, check the \"Library Management\" schema in the \"Schemas to install\" field.\r\n \r\n1. Click on 'Add Connection'.\r\n1. An error is thrown:\r\n - `\"(psycopg2.errors.DuplicateSchema) schema \\\"Library Management\\\" already exists\\n\\n[SQL: CREATE SCHEMA \\\"Library Management\\\";]\\n(Background on this error at: https://sqlalche.me/e/14/f405)\"`.\r\n - This is expected.\r\n1. The modal remains open. Uncheck the \"Library Management\" schema, and try clicking on 'Add Connection' again.\r\n1. Another error is thrown:\r\n - `duplicate key value violates unique constraint \"mathesar_database_name_key\" DETAIL: Key (name)=(pavish_install_test_1) already exists.`\r\n - From the user perpective, there's no clear next step for them to take when this happens.\r\n1. Close the modal and refresh the page, notice that the connection has been created already.\r\n1. Open the connection, and notice that it shows `Schemas (0)`.\r\n1. Forcing a manual reflection using the \"Sync external changes\" button shows the public schema and existing tables.\r\n\r\n### Note:\r\n* I performed the above steps 1-3 only as a means to force an error in the schema creation flow. While testing, I encountered an error which occurred due to an unknown reason. \r\n* This issue primarily focuses on steps 4-10.\r\n\r\n## Expected behavior\r\n* When an error occurs, there should not be an inconsistent middle state in the DB vs the UI.\r\n* When schema creation fails, we should try to revert back to the original state, or we should make it extra clear on the UI that it's a multi-step process.\n", "before_files": [{"content": "\"\"\"Utilities to help with creating and managing connections in Mathesar.\"\"\"\nfrom psycopg2.errors import DuplicateSchema\nfrom sqlalchemy.exc import OperationalError\nfrom mathesar.models.base import Database\nfrom db import install, connection as dbconn\nfrom mathesar.state import reset_reflection\nfrom demo.install.library_dataset import load_library_dataset\nfrom demo.install.movies_dataset import load_movies_dataset\n\n\nclass BadInstallationTarget(Exception):\n \"\"\"Raise when an attempt is made to install on a disallowed target\"\"\"\n pass\n\n\ndef copy_connection_from_preexisting(\n connection, nickname, db_name, create_db, sample_data\n):\n if connection['connection_type'] == 'internal_database':\n db_model = Database.create_from_settings_key('default')\n elif connection['connection_type'] == 'user_database':\n db_model = Database.current_objects.get(id=connection['id'])\n db_model.id = None\n else:\n raise KeyError(\"connection_type\")\n root_db = db_model.db_name\n return _save_and_install(\n db_model, db_name, root_db, nickname, create_db, sample_data\n )\n\n\ndef create_connection_from_scratch(\n user, password, host, port, nickname, db_name, sample_data\n):\n db_model = Database(username=user, password=password, host=host, port=port)\n root_db = db_name\n return _save_and_install(\n db_model, db_name, root_db, nickname, False, sample_data\n )\n\n\ndef create_connection_with_new_user(\n connection, user, password, nickname, db_name, create_db, sample_data\n):\n db_model = copy_connection_from_preexisting(\n connection, nickname, db_name, create_db, []\n )\n engine = db_model._sa_engine\n db_model.username = user\n db_model.password = password\n db_model.save()\n dbconn.execute_msar_func_with_engine(\n engine,\n 'create_basic_mathesar_user',\n db_model.username,\n db_model.password\n )\n _load_sample_data(db_model._sa_engine, sample_data)\n return db_model\n\n\ndef _save_and_install(\n db_model, db_name, root_db, nickname, create_db, sample_data\n):\n db_model.name = nickname\n db_model.db_name = db_name\n _validate_db_model(db_model)\n db_model.save()\n try:\n install.install_mathesar(\n database_name=db_model.db_name,\n username=db_model.username,\n password=db_model.password,\n hostname=db_model.host,\n port=db_model.port,\n skip_confirm=True,\n create_db=create_db,\n root_db=root_db,\n )\n except OperationalError as e:\n db_model.delete()\n raise e\n _load_sample_data(db_model._sa_engine, sample_data)\n return db_model\n\n\ndef _load_sample_data(engine, sample_data):\n DATASET_MAP = {\n 'library_management': load_library_dataset,\n 'movie_collection': load_movies_dataset,\n }\n for key in sample_data:\n try:\n DATASET_MAP[key](engine, safe_mode=True)\n except DuplicateSchema:\n # We swallow this error, since otherwise we'll raise an error on the\n # front end even though installation generally succeeded.\n continue\n reset_reflection()\n\n\ndef _validate_db_model(db_model):\n internal_db_model = Database.create_from_settings_key('default')\n if (\n internal_db_model is not None\n and db_model.host == internal_db_model.host\n and db_model.port == internal_db_model.port\n and db_model.db_name == internal_db_model.db_name\n ):\n raise BadInstallationTarget(\n \"Mathesar can't be installed in the internal DB namespace\"\n )\n", "path": "mathesar/utils/connections.py"}], "after_files": [{"content": "\"\"\"Utilities to help with creating and managing connections in Mathesar.\"\"\"\nfrom psycopg2.errors import DuplicateSchema\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\nfrom mathesar.models.base import Database\nfrom db import install, connection as dbconn\nfrom mathesar.state import reset_reflection\nfrom demo.install.library_dataset import load_library_dataset\nfrom demo.install.movies_dataset import load_movies_dataset\n\n\nclass BadInstallationTarget(Exception):\n \"\"\"Raise when an attempt is made to install on a disallowed target\"\"\"\n pass\n\n\ndef copy_connection_from_preexisting(\n connection, nickname, db_name, create_db, sample_data\n):\n if connection['connection_type'] == 'internal_database':\n db_model = Database.create_from_settings_key('default')\n elif connection['connection_type'] == 'user_database':\n db_model = Database.current_objects.get(id=connection['id'])\n db_model.id = None\n else:\n raise KeyError(\"connection_type\")\n root_db = db_model.db_name\n return _save_and_install(\n db_model, db_name, root_db, nickname, create_db, sample_data\n )\n\n\ndef create_connection_from_scratch(\n user, password, host, port, nickname, db_name, sample_data\n):\n db_model = Database(username=user, password=password, host=host, port=port)\n root_db = db_name\n return _save_and_install(\n db_model, db_name, root_db, nickname, False, sample_data\n )\n\n\ndef create_connection_with_new_user(\n connection, user, password, nickname, db_name, create_db, sample_data\n):\n db_model = copy_connection_from_preexisting(\n connection, nickname, db_name, create_db, []\n )\n engine = db_model._sa_engine\n db_model.username = user\n db_model.password = password\n db_model.save()\n dbconn.execute_msar_func_with_engine(\n engine,\n 'create_basic_mathesar_user',\n db_model.username,\n db_model.password\n )\n _load_sample_data(db_model._sa_engine, sample_data)\n return db_model\n\n\ndef _save_and_install(\n db_model, db_name, root_db, nickname, create_db, sample_data\n):\n db_model.name = nickname\n db_model.db_name = db_name\n _validate_db_model(db_model)\n db_model.save()\n try:\n install.install_mathesar(\n database_name=db_model.db_name,\n username=db_model.username,\n password=db_model.password,\n hostname=db_model.host,\n port=db_model.port,\n skip_confirm=True,\n create_db=create_db,\n root_db=root_db,\n )\n except OperationalError as e:\n db_model.delete()\n raise e\n _load_sample_data(db_model._sa_engine, sample_data)\n return db_model\n\n\ndef _load_sample_data(engine, sample_data):\n DATASET_MAP = {\n 'library_management': load_library_dataset,\n 'movie_collection': load_movies_dataset,\n }\n for key in sample_data:\n try:\n DATASET_MAP[key](engine, safe_mode=True)\n except ProgrammingError as e:\n if isinstance(e.orig, DuplicateSchema):\n # We swallow this error, since otherwise we'll raise an error on the\n # front end even though installation generally succeeded.\n continue\n reset_reflection()\n\n\ndef _validate_db_model(db_model):\n internal_db_model = Database.create_from_settings_key('default')\n if (\n internal_db_model is not None\n and db_model.host == internal_db_model.host\n and db_model.port == internal_db_model.port\n and db_model.db_name == internal_db_model.db_name\n ):\n raise BadInstallationTarget(\n \"Mathesar can't be installed in the internal DB namespace\"\n )\n", "path": "mathesar/utils/connections.py"}]} | 1,788 | 243 |
gh_patches_debug_43089 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1898 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Socket in use error when sending epp messages
### Current Behavior
Occasionally, we are unable to send messages via epp because of the following error:
"InfoDomain failed to execute due to an unknown error. Error: This socket is already used by another greenlet: <bound method Waiter.switch of <gevent._gevent_c_waiter.Waiter object at 0x7f8e1e5f6750>>"
### Expected Behavior
No error should occur.
### Steps to Reproduce
reproduction steps unknown, this happens irregularly and we haven't narrowed down a specif set of easily followed steps that can always guarantee this occurs. see additional context
### Environment
_No response_
### Additional Context
This bug started after we added gunicorn using gevent instead of just the default sync option. While we still should be asynchronious this creates an issue with how we are using our sockets on epp calls. We were able to see this more with the removal of the connection pool.
### Issue Links
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/epplibwrapper/client.py`
Content:
```
1 """Provide a wrapper around epplib to handle authentication and errors."""
2
3 import logging
4
5 try:
6 from epplib.client import Client
7 from epplib import commands
8 from epplib.exceptions import TransportError, ParsingError
9 from epplib.transport import SocketTransport
10 except ImportError:
11 pass
12
13 from django.conf import settings
14
15 from .cert import Cert, Key
16 from .errors import ErrorCode, LoginError, RegistryError
17
18 logger = logging.getLogger(__name__)
19
20 try:
21 # Write cert and key to disk
22 CERT = Cert()
23 KEY = Key()
24 except Exception:
25 CERT = None # type: ignore
26 KEY = None # type: ignore
27 logger.warning(
28 "Problem with client certificate. Registrar cannot contact registry.",
29 exc_info=True,
30 )
31
32
33 class EPPLibWrapper:
34 """
35 A wrapper over epplib's client.
36
37 ATTN: This should not be used directly. Use `Domain` from domain.py.
38 """
39
40 def __init__(self) -> None:
41 """Initialize settings which will be used for all connections."""
42 # set _client to None initially. In the event that the __init__ fails
43 # before _client initializes, app should still start and be in a state
44 # that it can attempt _client initialization on send attempts
45 self._client = None # type: ignore
46 # prepare (but do not send) a Login command
47 self._login = commands.Login(
48 cl_id=settings.SECRET_REGISTRY_CL_ID,
49 password=settings.SECRET_REGISTRY_PASSWORD,
50 obj_uris=[
51 "urn:ietf:params:xml:ns:domain-1.0",
52 "urn:ietf:params:xml:ns:contact-1.0",
53 ],
54 )
55 try:
56 self._initialize_client()
57 except Exception:
58 logger.warning("Unable to configure epplib. Registrar cannot contact registry.")
59
60 def _initialize_client(self) -> None:
61 """Initialize a client, assuming _login defined. Sets _client to initialized
62 client. Raises errors if initialization fails.
63 This method will be called at app initialization, and also during retries."""
64 # establish a client object with a TCP socket transport
65 # note that type: ignore added in several places because linter complains
66 # about _client initially being set to None, and None type doesn't match code
67 self._client = Client( # type: ignore
68 SocketTransport(
69 settings.SECRET_REGISTRY_HOSTNAME,
70 cert_file=CERT.filename,
71 key_file=KEY.filename,
72 password=settings.SECRET_REGISTRY_KEY_PASSPHRASE,
73 )
74 )
75 try:
76 # use the _client object to connect
77 self._client.connect() # type: ignore
78 response = self._client.send(self._login) # type: ignore
79 if response.code >= 2000: # type: ignore
80 self._client.close() # type: ignore
81 raise LoginError(response.msg) # type: ignore
82 except TransportError as err:
83 message = "_initialize_client failed to execute due to a connection error."
84 logger.error(f"{message} Error: {err}")
85 raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err
86 except LoginError as err:
87 raise err
88 except Exception as err:
89 message = "_initialize_client failed to execute due to an unknown error."
90 logger.error(f"{message} Error: {err}")
91 raise RegistryError(message) from err
92
93 def _disconnect(self) -> None:
94 """Close the connection."""
95 try:
96 self._client.send(commands.Logout()) # type: ignore
97 self._client.close() # type: ignore
98 except Exception:
99 logger.warning("Connection to registry was not cleanly closed.")
100
101 def _send(self, command):
102 """Helper function used by `send`."""
103 cmd_type = command.__class__.__name__
104
105 try:
106 # check for the condition that the _client was not initialized properly
107 # at app initialization
108 if self._client is None:
109 self._initialize_client()
110 response = self._client.send(command)
111 except (ValueError, ParsingError) as err:
112 message = f"{cmd_type} failed to execute due to some syntax error."
113 logger.error(f"{message} Error: {err}")
114 raise RegistryError(message) from err
115 except TransportError as err:
116 message = f"{cmd_type} failed to execute due to a connection error."
117 logger.error(f"{message} Error: {err}")
118 raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err
119 except LoginError as err:
120 # For linter due to it not liking this line length
121 text = "failed to execute due to a registry login error."
122 message = f"{cmd_type} {text}"
123 logger.error(f"{message} Error: {err}")
124 raise RegistryError(message) from err
125 except Exception as err:
126 message = f"{cmd_type} failed to execute due to an unknown error."
127 logger.error(f"{message} Error: {err}")
128 raise RegistryError(message) from err
129 else:
130 if response.code >= 2000:
131 raise RegistryError(response.msg, code=response.code)
132 else:
133 return response
134
135 def _retry(self, command):
136 """Retry sending a command through EPP by re-initializing the client
137 and then sending the command."""
138 # re-initialize by disconnecting and initial
139 self._disconnect()
140 self._initialize_client()
141 return self._send(command)
142
143 def send(self, command, *, cleaned=False):
144 """Login, the send the command. Retry once if an error is found"""
145 # try to prevent use of this method without appropriate safeguards
146 cmd_type = command.__class__.__name__
147 if not cleaned:
148 raise ValueError("Please sanitize user input before sending it.")
149 try:
150 return self._send(command)
151 except RegistryError as err:
152 if (
153 err.is_transport_error()
154 or err.is_connection_error()
155 or err.is_session_error()
156 or err.is_server_error()
157 or err.should_retry()
158 ):
159 message = f"{cmd_type} failed and will be retried"
160 logger.info(f"{message} Error: {err}")
161 return self._retry(command)
162 else:
163 raise err
164
165
166 try:
167 # Initialize epplib
168 CLIENT = EPPLibWrapper()
169 logger.info("registry client initialized")
170 except Exception:
171 logger.warning("Unable to configure epplib. Registrar cannot contact registry.")
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/epplibwrapper/client.py b/src/epplibwrapper/client.py
--- a/src/epplibwrapper/client.py
+++ b/src/epplibwrapper/client.py
@@ -1,6 +1,7 @@
"""Provide a wrapper around epplib to handle authentication and errors."""
import logging
+from gevent.lock import BoundedSemaphore
try:
from epplib.client import Client
@@ -52,10 +53,16 @@
"urn:ietf:params:xml:ns:contact-1.0",
],
)
+ # We should only ever have one active connection at a time
+ self.connection_lock = BoundedSemaphore(1)
+
+ self.connection_lock.acquire()
try:
self._initialize_client()
except Exception:
- logger.warning("Unable to configure epplib. Registrar cannot contact registry.")
+ logger.warning("Unable to configure the connection to the registry.")
+ finally:
+ self.connection_lock.release()
def _initialize_client(self) -> None:
"""Initialize a client, assuming _login defined. Sets _client to initialized
@@ -74,11 +81,7 @@
)
try:
# use the _client object to connect
- self._client.connect() # type: ignore
- response = self._client.send(self._login) # type: ignore
- if response.code >= 2000: # type: ignore
- self._client.close() # type: ignore
- raise LoginError(response.msg) # type: ignore
+ self._connect()
except TransportError as err:
message = "_initialize_client failed to execute due to a connection error."
logger.error(f"{message} Error: {err}")
@@ -90,13 +93,33 @@
logger.error(f"{message} Error: {err}")
raise RegistryError(message) from err
+ def _connect(self) -> None:
+ """Connects to EPP. Sends a login command. If an invalid response is returned,
+ the client will be closed and a LoginError raised."""
+ self._client.connect() # type: ignore
+ response = self._client.send(self._login) # type: ignore
+ if response.code >= 2000: # type: ignore
+ self._client.close() # type: ignore
+ raise LoginError(response.msg) # type: ignore
+
def _disconnect(self) -> None:
- """Close the connection."""
+ """Close the connection. Sends a logout command and closes the connection."""
+ self._send_logout_command()
+ self._close_client()
+
+ def _send_logout_command(self):
+ """Sends a logout command to epp"""
try:
self._client.send(commands.Logout()) # type: ignore
- self._client.close() # type: ignore
- except Exception:
- logger.warning("Connection to registry was not cleanly closed.")
+ except Exception as err:
+ logger.warning(f"Logout command not sent successfully: {err}")
+
+ def _close_client(self):
+ """Closes an active client connection"""
+ try:
+ self._client.close()
+ except Exception as err:
+ logger.warning(f"Connection to registry was not cleanly closed: {err}")
def _send(self, command):
"""Helper function used by `send`."""
@@ -146,6 +169,8 @@
cmd_type = command.__class__.__name__
if not cleaned:
raise ValueError("Please sanitize user input before sending it.")
+
+ self.connection_lock.acquire()
try:
return self._send(command)
except RegistryError as err:
@@ -161,6 +186,8 @@
return self._retry(command)
else:
raise err
+ finally:
+ self.connection_lock.release()
try:
| {"golden_diff": "diff --git a/src/epplibwrapper/client.py b/src/epplibwrapper/client.py\n--- a/src/epplibwrapper/client.py\n+++ b/src/epplibwrapper/client.py\n@@ -1,6 +1,7 @@\n \"\"\"Provide a wrapper around epplib to handle authentication and errors.\"\"\"\n \n import logging\n+from gevent.lock import BoundedSemaphore\n \n try:\n from epplib.client import Client\n@@ -52,10 +53,16 @@\n \"urn:ietf:params:xml:ns:contact-1.0\",\n ],\n )\n+ # We should only ever have one active connection at a time\n+ self.connection_lock = BoundedSemaphore(1)\n+\n+ self.connection_lock.acquire()\n try:\n self._initialize_client()\n except Exception:\n- logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n+ logger.warning(\"Unable to configure the connection to the registry.\")\n+ finally:\n+ self.connection_lock.release()\n \n def _initialize_client(self) -> None:\n \"\"\"Initialize a client, assuming _login defined. Sets _client to initialized\n@@ -74,11 +81,7 @@\n )\n try:\n # use the _client object to connect\n- self._client.connect() # type: ignore\n- response = self._client.send(self._login) # type: ignore\n- if response.code >= 2000: # type: ignore\n- self._client.close() # type: ignore\n- raise LoginError(response.msg) # type: ignore\n+ self._connect()\n except TransportError as err:\n message = \"_initialize_client failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n@@ -90,13 +93,33 @@\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n \n+ def _connect(self) -> None:\n+ \"\"\"Connects to EPP. Sends a login command. If an invalid response is returned,\n+ the client will be closed and a LoginError raised.\"\"\"\n+ self._client.connect() # type: ignore\n+ response = self._client.send(self._login) # type: ignore\n+ if response.code >= 2000: # type: ignore\n+ self._client.close() # type: ignore\n+ raise LoginError(response.msg) # type: ignore\n+\n def _disconnect(self) -> None:\n- \"\"\"Close the connection.\"\"\"\n+ \"\"\"Close the connection. Sends a logout command and closes the connection.\"\"\"\n+ self._send_logout_command()\n+ self._close_client()\n+\n+ def _send_logout_command(self):\n+ \"\"\"Sends a logout command to epp\"\"\"\n try:\n self._client.send(commands.Logout()) # type: ignore\n- self._client.close() # type: ignore\n- except Exception:\n- logger.warning(\"Connection to registry was not cleanly closed.\")\n+ except Exception as err:\n+ logger.warning(f\"Logout command not sent successfully: {err}\")\n+\n+ def _close_client(self):\n+ \"\"\"Closes an active client connection\"\"\"\n+ try:\n+ self._client.close()\n+ except Exception as err:\n+ logger.warning(f\"Connection to registry was not cleanly closed: {err}\")\n \n def _send(self, command):\n \"\"\"Helper function used by `send`.\"\"\"\n@@ -146,6 +169,8 @@\n cmd_type = command.__class__.__name__\n if not cleaned:\n raise ValueError(\"Please sanitize user input before sending it.\")\n+\n+ self.connection_lock.acquire()\n try:\n return self._send(command)\n except RegistryError as err:\n@@ -161,6 +186,8 @@\n return self._retry(command)\n else:\n raise err\n+ finally:\n+ self.connection_lock.release()\n \n \n try:\n", "issue": "Socket in use error when sending epp messages\n### Current Behavior\n\nOccasionally, we are unable to send messages via epp because of the following error:\r\n\"InfoDomain failed to execute due to an unknown error. Error: This socket is already used by another greenlet: <bound method Waiter.switch of <gevent._gevent_c_waiter.Waiter object at 0x7f8e1e5f6750>>\"\n\n### Expected Behavior\n\nNo error should occur.\n\n### Steps to Reproduce\n\nreproduction steps unknown, this happens irregularly and we haven't narrowed down a specif set of easily followed steps that can always guarantee this occurs. see additional context\r\n\n\n### Environment\n\n_No response_\n\n### Additional Context\n\nThis bug started after we added gunicorn using gevent instead of just the default sync option. While we still should be asynchronious this creates an issue with how we are using our sockets on epp calls. We were able to see this more with the removal of the connection pool. \n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "\"\"\"Provide a wrapper around epplib to handle authentication and errors.\"\"\"\n\nimport logging\n\ntry:\n from epplib.client import Client\n from epplib import commands\n from epplib.exceptions import TransportError, ParsingError\n from epplib.transport import SocketTransport\nexcept ImportError:\n pass\n\nfrom django.conf import settings\n\nfrom .cert import Cert, Key\nfrom .errors import ErrorCode, LoginError, RegistryError\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Write cert and key to disk\n CERT = Cert()\n KEY = Key()\nexcept Exception:\n CERT = None # type: ignore\n KEY = None # type: ignore\n logger.warning(\n \"Problem with client certificate. Registrar cannot contact registry.\",\n exc_info=True,\n )\n\n\nclass EPPLibWrapper:\n \"\"\"\n A wrapper over epplib's client.\n\n ATTN: This should not be used directly. Use `Domain` from domain.py.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize settings which will be used for all connections.\"\"\"\n # set _client to None initially. In the event that the __init__ fails\n # before _client initializes, app should still start and be in a state\n # that it can attempt _client initialization on send attempts\n self._client = None # type: ignore\n # prepare (but do not send) a Login command\n self._login = commands.Login(\n cl_id=settings.SECRET_REGISTRY_CL_ID,\n password=settings.SECRET_REGISTRY_PASSWORD,\n obj_uris=[\n \"urn:ietf:params:xml:ns:domain-1.0\",\n \"urn:ietf:params:xml:ns:contact-1.0\",\n ],\n )\n try:\n self._initialize_client()\n except Exception:\n logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n\n def _initialize_client(self) -> None:\n \"\"\"Initialize a client, assuming _login defined. Sets _client to initialized\n client. Raises errors if initialization fails.\n This method will be called at app initialization, and also during retries.\"\"\"\n # establish a client object with a TCP socket transport\n # note that type: ignore added in several places because linter complains\n # about _client initially being set to None, and None type doesn't match code\n self._client = Client( # type: ignore\n SocketTransport(\n settings.SECRET_REGISTRY_HOSTNAME,\n cert_file=CERT.filename,\n key_file=KEY.filename,\n password=settings.SECRET_REGISTRY_KEY_PASSPHRASE,\n )\n )\n try:\n # use the _client object to connect\n self._client.connect() # type: ignore\n response = self._client.send(self._login) # type: ignore\n if response.code >= 2000: # type: ignore\n self._client.close() # type: ignore\n raise LoginError(response.msg) # type: ignore\n except TransportError as err:\n message = \"_initialize_client failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err\n except LoginError as err:\n raise err\n except Exception as err:\n message = \"_initialize_client failed to execute due to an unknown error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n\n def _disconnect(self) -> None:\n \"\"\"Close the connection.\"\"\"\n try:\n self._client.send(commands.Logout()) # type: ignore\n self._client.close() # type: ignore\n except Exception:\n logger.warning(\"Connection to registry was not cleanly closed.\")\n\n def _send(self, command):\n \"\"\"Helper function used by `send`.\"\"\"\n cmd_type = command.__class__.__name__\n\n try:\n # check for the condition that the _client was not initialized properly\n # at app initialization\n if self._client is None:\n self._initialize_client()\n response = self._client.send(command)\n except (ValueError, ParsingError) as err:\n message = f\"{cmd_type} failed to execute due to some syntax error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n except TransportError as err:\n message = f\"{cmd_type} failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err\n except LoginError as err:\n # For linter due to it not liking this line length\n text = \"failed to execute due to a registry login error.\"\n message = f\"{cmd_type} {text}\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n except Exception as err:\n message = f\"{cmd_type} failed to execute due to an unknown error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n else:\n if response.code >= 2000:\n raise RegistryError(response.msg, code=response.code)\n else:\n return response\n\n def _retry(self, command):\n \"\"\"Retry sending a command through EPP by re-initializing the client\n and then sending the command.\"\"\"\n # re-initialize by disconnecting and initial\n self._disconnect()\n self._initialize_client()\n return self._send(command)\n\n def send(self, command, *, cleaned=False):\n \"\"\"Login, the send the command. Retry once if an error is found\"\"\"\n # try to prevent use of this method without appropriate safeguards\n cmd_type = command.__class__.__name__\n if not cleaned:\n raise ValueError(\"Please sanitize user input before sending it.\")\n try:\n return self._send(command)\n except RegistryError as err:\n if (\n err.is_transport_error()\n or err.is_connection_error()\n or err.is_session_error()\n or err.is_server_error()\n or err.should_retry()\n ):\n message = f\"{cmd_type} failed and will be retried\"\n logger.info(f\"{message} Error: {err}\")\n return self._retry(command)\n else:\n raise err\n\n\ntry:\n # Initialize epplib\n CLIENT = EPPLibWrapper()\n logger.info(\"registry client initialized\")\nexcept Exception:\n logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n", "path": "src/epplibwrapper/client.py"}], "after_files": [{"content": "\"\"\"Provide a wrapper around epplib to handle authentication and errors.\"\"\"\n\nimport logging\nfrom gevent.lock import BoundedSemaphore\n\ntry:\n from epplib.client import Client\n from epplib import commands\n from epplib.exceptions import TransportError, ParsingError\n from epplib.transport import SocketTransport\nexcept ImportError:\n pass\n\nfrom django.conf import settings\n\nfrom .cert import Cert, Key\nfrom .errors import ErrorCode, LoginError, RegistryError\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Write cert and key to disk\n CERT = Cert()\n KEY = Key()\nexcept Exception:\n CERT = None # type: ignore\n KEY = None # type: ignore\n logger.warning(\n \"Problem with client certificate. Registrar cannot contact registry.\",\n exc_info=True,\n )\n\n\nclass EPPLibWrapper:\n \"\"\"\n A wrapper over epplib's client.\n\n ATTN: This should not be used directly. Use `Domain` from domain.py.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize settings which will be used for all connections.\"\"\"\n # set _client to None initially. In the event that the __init__ fails\n # before _client initializes, app should still start and be in a state\n # that it can attempt _client initialization on send attempts\n self._client = None # type: ignore\n # prepare (but do not send) a Login command\n self._login = commands.Login(\n cl_id=settings.SECRET_REGISTRY_CL_ID,\n password=settings.SECRET_REGISTRY_PASSWORD,\n obj_uris=[\n \"urn:ietf:params:xml:ns:domain-1.0\",\n \"urn:ietf:params:xml:ns:contact-1.0\",\n ],\n )\n # We should only ever have one active connection at a time\n self.connection_lock = BoundedSemaphore(1)\n\n self.connection_lock.acquire()\n try:\n self._initialize_client()\n except Exception:\n logger.warning(\"Unable to configure the connection to the registry.\")\n finally:\n self.connection_lock.release()\n\n def _initialize_client(self) -> None:\n \"\"\"Initialize a client, assuming _login defined. Sets _client to initialized\n client. Raises errors if initialization fails.\n This method will be called at app initialization, and also during retries.\"\"\"\n # establish a client object with a TCP socket transport\n # note that type: ignore added in several places because linter complains\n # about _client initially being set to None, and None type doesn't match code\n self._client = Client( # type: ignore\n SocketTransport(\n settings.SECRET_REGISTRY_HOSTNAME,\n cert_file=CERT.filename,\n key_file=KEY.filename,\n password=settings.SECRET_REGISTRY_KEY_PASSPHRASE,\n )\n )\n try:\n # use the _client object to connect\n self._connect()\n except TransportError as err:\n message = \"_initialize_client failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err\n except LoginError as err:\n raise err\n except Exception as err:\n message = \"_initialize_client failed to execute due to an unknown error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n\n def _connect(self) -> None:\n \"\"\"Connects to EPP. Sends a login command. If an invalid response is returned,\n the client will be closed and a LoginError raised.\"\"\"\n self._client.connect() # type: ignore\n response = self._client.send(self._login) # type: ignore\n if response.code >= 2000: # type: ignore\n self._client.close() # type: ignore\n raise LoginError(response.msg) # type: ignore\n\n def _disconnect(self) -> None:\n \"\"\"Close the connection. Sends a logout command and closes the connection.\"\"\"\n self._send_logout_command()\n self._close_client()\n\n def _send_logout_command(self):\n \"\"\"Sends a logout command to epp\"\"\"\n try:\n self._client.send(commands.Logout()) # type: ignore\n except Exception as err:\n logger.warning(f\"Logout command not sent successfully: {err}\")\n\n def _close_client(self):\n \"\"\"Closes an active client connection\"\"\"\n try:\n self._client.close()\n except Exception as err:\n logger.warning(f\"Connection to registry was not cleanly closed: {err}\")\n\n def _send(self, command):\n \"\"\"Helper function used by `send`.\"\"\"\n cmd_type = command.__class__.__name__\n\n try:\n # check for the condition that the _client was not initialized properly\n # at app initialization\n if self._client is None:\n self._initialize_client()\n response = self._client.send(command)\n except (ValueError, ParsingError) as err:\n message = f\"{cmd_type} failed to execute due to some syntax error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n except TransportError as err:\n message = f\"{cmd_type} failed to execute due to a connection error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message, code=ErrorCode.TRANSPORT_ERROR) from err\n except LoginError as err:\n # For linter due to it not liking this line length\n text = \"failed to execute due to a registry login error.\"\n message = f\"{cmd_type} {text}\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n except Exception as err:\n message = f\"{cmd_type} failed to execute due to an unknown error.\"\n logger.error(f\"{message} Error: {err}\")\n raise RegistryError(message) from err\n else:\n if response.code >= 2000:\n raise RegistryError(response.msg, code=response.code)\n else:\n return response\n\n def _retry(self, command):\n \"\"\"Retry sending a command through EPP by re-initializing the client\n and then sending the command.\"\"\"\n # re-initialize by disconnecting and initial\n self._disconnect()\n self._initialize_client()\n return self._send(command)\n\n def send(self, command, *, cleaned=False):\n \"\"\"Login, the send the command. Retry once if an error is found\"\"\"\n # try to prevent use of this method without appropriate safeguards\n cmd_type = command.__class__.__name__\n if not cleaned:\n raise ValueError(\"Please sanitize user input before sending it.\")\n\n self.connection_lock.acquire()\n try:\n return self._send(command)\n except RegistryError as err:\n if (\n err.is_transport_error()\n or err.is_connection_error()\n or err.is_session_error()\n or err.is_server_error()\n or err.should_retry()\n ):\n message = f\"{cmd_type} failed and will be retried\"\n logger.info(f\"{message} Error: {err}\")\n return self._retry(command)\n else:\n raise err\n finally:\n self.connection_lock.release()\n\n\ntry:\n # Initialize epplib\n CLIENT = EPPLibWrapper()\n logger.info(\"registry client initialized\")\nexcept Exception:\n logger.warning(\"Unable to configure epplib. Registrar cannot contact registry.\")\n", "path": "src/epplibwrapper/client.py"}]} | 2,288 | 860 |
gh_patches_debug_27880 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5720 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/lazy/pretrained.py`
Content:
```
1 import os
2 from typing import Callable, Optional, Union
3
4 import torch
5 from torch.nn import Module
6
7 from colossalai.interface import pretrained as pretrained_interface
8
9
10 class PretrainedManager:
11 old_from_pretrained: Optional[Callable] = None
12
13 @staticmethod
14 def inject() -> None:
15 try:
16 from transformers.modeling_utils import PreTrainedModel
17 except ImportError:
18 return
19 # recover bound method to plain function
20 PretrainedManager.old_from_pretrained = PreTrainedModel.from_pretrained.__func__
21 PreTrainedModel.from_pretrained = new_from_pretrained
22
23 @staticmethod
24 def recover() -> None:
25 try:
26 from transformers.modeling_utils import PreTrainedModel
27 except ImportError:
28 return
29 # convert plain function to class method
30 PreTrainedModel.from_pretrained = classmethod(PretrainedManager.old_from_pretrained)
31 PretrainedManager.old_from_pretrained = None
32
33
34 @classmethod
35 def new_from_pretrained(
36 cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs
37 ) -> Module:
38 from transformers import GenerationConfig
39 from transformers.configuration_utils import PretrainedConfig
40 from transformers.modeling_utils import (
41 ContextManagers,
42 _add_variant,
43 cached_file,
44 download_url,
45 has_file,
46 is_offline_mode,
47 is_remote_url,
48 no_init_weights,
49 )
50 from transformers.utils import (
51 SAFE_WEIGHTS_INDEX_NAME,
52 SAFE_WEIGHTS_NAME,
53 WEIGHTS_INDEX_NAME,
54 WEIGHTS_NAME,
55 is_safetensors_available,
56 logging,
57 )
58
59 logger = logging.get_logger(__name__)
60
61 config = kwargs.pop("config", None)
62 cache_dir = kwargs.pop("cache_dir", None)
63 force_download = kwargs.pop("force_download", False)
64 resume_download = kwargs.pop("resume_download", False)
65 proxies = kwargs.pop("proxies", None)
66 local_files_only = kwargs.pop("local_files_only", False)
67 use_auth_token = kwargs.pop("use_auth_token", None)
68 revision = kwargs.pop("revision", None)
69 _ = kwargs.pop("mirror", None)
70 from_pipeline = kwargs.pop("_from_pipeline", None)
71 from_auto_class = kwargs.pop("_from_auto", False)
72 _fast_init = kwargs.pop("_fast_init", True)
73 torch_dtype = kwargs.pop("torch_dtype", None)
74 subfolder = kwargs.pop("subfolder", "")
75 commit_hash = kwargs.pop("_commit_hash", None)
76 variant = kwargs.pop("variant", None)
77 use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
78
79 if len(kwargs) > 0:
80 logger.warning(f"Below kwargs may be ignored: {list(kwargs.keys())}")
81
82 from_pt = True
83
84 user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
85 if from_pipeline is not None:
86 user_agent["using_pipeline"] = from_pipeline
87
88 if is_offline_mode() and not local_files_only:
89 logger.info("Offline mode: forcing local_files_only=True")
90 local_files_only = True
91
92 # Load config if we don't provide a configuration
93 if not isinstance(config, PretrainedConfig):
94 config_path = config if config is not None else pretrained_model_name_or_path
95 config, model_kwargs = cls.config_class.from_pretrained(
96 config_path,
97 cache_dir=cache_dir,
98 return_unused_kwargs=True,
99 force_download=force_download,
100 resume_download=resume_download,
101 proxies=proxies,
102 local_files_only=local_files_only,
103 use_auth_token=use_auth_token,
104 revision=revision,
105 subfolder=subfolder,
106 _from_auto=from_auto_class,
107 _from_pipeline=from_pipeline,
108 **kwargs,
109 )
110 else:
111 model_kwargs = kwargs
112
113 if commit_hash is None:
114 commit_hash = getattr(config, "_commit_hash", None)
115
116 # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the
117 # index of the files.
118
119 if pretrained_model_name_or_path is not None:
120 pretrained_model_name_or_path = str(pretrained_model_name_or_path)
121 is_local = os.path.isdir(pretrained_model_name_or_path)
122 if is_local:
123 if use_safetensors is not False and os.path.isfile(
124 os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))
125 ):
126 # Load from a safetensors checkpoint
127 archive_file = os.path.join(
128 pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)
129 )
130 elif use_safetensors is not False and os.path.isfile(
131 os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant))
132 ):
133 # Load from a sharded safetensors checkpoint
134 archive_file = os.path.join(
135 pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)
136 )
137 elif os.path.isfile(
138 os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))
139 ):
140 # Load from a PyTorch checkpoint
141 archive_file = os.path.join(
142 pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)
143 )
144 elif os.path.isfile(
145 os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))
146 ):
147 # Load from a sharded PyTorch checkpoint
148 archive_file = os.path.join(
149 pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)
150 )
151 else:
152 raise EnvironmentError(
153 f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
154 f" {pretrained_model_name_or_path}."
155 )
156 elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
157 archive_file = pretrained_model_name_or_path
158 is_local = True
159 elif is_remote_url(pretrained_model_name_or_path):
160 filename = pretrained_model_name_or_path
161 resolved_archive_file = download_url(pretrained_model_name_or_path)
162 else:
163 # set correct filename
164 if use_safetensors is not False:
165 filename = _add_variant(SAFE_WEIGHTS_NAME, variant)
166 else:
167 filename = _add_variant(WEIGHTS_NAME, variant)
168
169 try:
170 # Load from URL or cache if already cached
171 cached_file_kwargs = {
172 "cache_dir": cache_dir,
173 "force_download": force_download,
174 "proxies": proxies,
175 "resume_download": resume_download,
176 "local_files_only": local_files_only,
177 "use_auth_token": use_auth_token,
178 "user_agent": user_agent,
179 "revision": revision,
180 "subfolder": subfolder,
181 "_raise_exceptions_for_missing_entries": False,
182 "_commit_hash": commit_hash,
183 }
184 resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
185
186 # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
187 # result when internet is up, the repo and revision exist, but the file does not.
188 if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant):
189 # Maybe the checkpoint is sharded, we try to grab the index name in this case.
190 resolved_archive_file = cached_file(
191 pretrained_model_name_or_path,
192 _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant),
193 **cached_file_kwargs,
194 )
195 if resolved_archive_file is not None:
196 pass
197 elif use_safetensors:
198 raise EnvironmentError(
199 f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} and thus cannot be loaded with `safetensors`. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`."
200 )
201 else:
202 # This repo has no safetensors file of any kind, we switch to PyTorch.
203 filename = _add_variant(WEIGHTS_NAME, variant)
204 resolved_archive_file = cached_file(
205 pretrained_model_name_or_path, filename, **cached_file_kwargs
206 )
207 if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant):
208 # Maybe the checkpoint is sharded, we try to grab the index name in this case.
209 resolved_archive_file = cached_file(
210 pretrained_model_name_or_path,
211 _add_variant(WEIGHTS_INDEX_NAME, variant),
212 **cached_file_kwargs,
213 )
214 if resolved_archive_file is not None:
215 pass
216 if resolved_archive_file is None:
217 # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error
218 # message.
219 has_file_kwargs = {
220 "revision": revision,
221 "proxies": proxies,
222 "use_auth_token": use_auth_token,
223 }
224 if variant is not None and has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
225 raise EnvironmentError(
226 f"{pretrained_model_name_or_path} does not appear to have a file named"
227 f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant"
228 f" {variant}. Use `variant=None` to load this model from those weights."
229 )
230 else:
231 raise EnvironmentError(
232 f"{pretrained_model_name_or_path} does not appear to have a file named"
233 f" {_add_variant(WEIGHTS_NAME, variant)}"
234 )
235 except EnvironmentError:
236 # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted
237 # to the original exception.
238 raise
239 except Exception:
240 # For any other exception, we throw a generic error.
241 raise EnvironmentError(
242 f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it"
243 " from 'https://huggingface.co/models', make sure you don't have a local directory with the"
244 f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
245 f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}."
246 )
247
248 if is_local:
249 logger.info(f"loading weights file {archive_file}")
250 resolved_archive_file = archive_file
251 else:
252 logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}")
253 else:
254 resolved_archive_file = None
255
256 if from_pt:
257 # set dtype to instantiate the model under:
258 # 1. If torch_dtype is not None, we use that dtype
259 dtype_orig = None
260
261 if torch_dtype is not None:
262 if not isinstance(torch_dtype, torch.dtype):
263 raise ValueError(f"`torch_dtype` can be either `torch.dtype` or `None`, but received {torch_dtype}")
264 dtype_orig = cls._set_default_torch_dtype(torch_dtype)
265
266 config.name_or_path = pretrained_model_name_or_path
267
268 # Instantiate model.
269 init_contexts = [no_init_weights(_enable=_fast_init)]
270
271 with ContextManagers(init_contexts):
272 model = cls(config, *model_args, **model_kwargs)
273
274 if from_pt:
275 # restore default dtype
276 if dtype_orig is not None:
277 torch.set_default_dtype(dtype_orig)
278
279 # make sure token embedding weights are still tied if needed
280 model.tie_weights()
281
282 # Set model in evaluation mode to deactivate DropOut modules by default
283 model.eval()
284
285 # If it is a model with generation capabilities, attempt to load the generation config
286 if model.can_generate():
287 try:
288 model.generation_config = GenerationConfig.from_pretrained(
289 pretrained_model_name_or_path,
290 cache_dir=cache_dir,
291 force_download=force_download,
292 resume_download=resume_download,
293 proxies=proxies,
294 local_files_only=local_files_only,
295 use_auth_token=use_auth_token,
296 revision=revision,
297 subfolder=subfolder,
298 _from_auto=from_auto_class,
299 _from_pipeline=from_pipeline,
300 **kwargs,
301 )
302 except (OSError, TypeError):
303 logger.info("Generation config file not found, using a generation config created from the model config.")
304
305 # set pretrained path
306 if resolved_archive_file:
307 pretrained_interface.set_pretrained_path(model, resolved_archive_file)
308
309 return model
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/lazy/pretrained.py b/colossalai/lazy/pretrained.py
--- a/colossalai/lazy/pretrained.py
+++ b/colossalai/lazy/pretrained.py
@@ -1,3 +1,4 @@
+import copy
import os
from typing import Callable, Optional, Union
@@ -74,6 +75,24 @@
subfolder = kwargs.pop("subfolder", "")
commit_hash = kwargs.pop("_commit_hash", None)
variant = kwargs.pop("variant", None)
+
+ kwargs.pop("state_dict", None)
+ kwargs.pop("from_tf", False)
+ kwargs.pop("from_flax", False)
+ kwargs.pop("output_loading_info", False)
+ kwargs.pop("trust_remote_code", None)
+ kwargs.pop("low_cpu_mem_usage", None)
+ kwargs.pop("device_map", None)
+ kwargs.pop("max_memory", None)
+ kwargs.pop("offload_folder", None)
+ kwargs.pop("offload_state_dict", False)
+ kwargs.pop("load_in_8bit", False)
+ kwargs.pop("load_in_4bit", False)
+ kwargs.pop("quantization_config", None)
+ kwargs.pop("adapter_kwargs", {})
+ kwargs.pop("adapter_name", "default")
+ kwargs.pop("use_flash_attention_2", False)
+
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
if len(kwargs) > 0:
@@ -108,6 +127,10 @@
**kwargs,
)
else:
+ config = copy.deepcopy(config)
+ kwarg_attn_imp = kwargs.pop("attn_implementation", None)
+ if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp:
+ config._attn_implementation = kwarg_attn_imp
model_kwargs = kwargs
if commit_hash is None:
| {"golden_diff": "diff --git a/colossalai/lazy/pretrained.py b/colossalai/lazy/pretrained.py\n--- a/colossalai/lazy/pretrained.py\n+++ b/colossalai/lazy/pretrained.py\n@@ -1,3 +1,4 @@\n+import copy\n import os\n from typing import Callable, Optional, Union\n \n@@ -74,6 +75,24 @@\n subfolder = kwargs.pop(\"subfolder\", \"\")\n commit_hash = kwargs.pop(\"_commit_hash\", None)\n variant = kwargs.pop(\"variant\", None)\n+\n+ kwargs.pop(\"state_dict\", None)\n+ kwargs.pop(\"from_tf\", False)\n+ kwargs.pop(\"from_flax\", False)\n+ kwargs.pop(\"output_loading_info\", False)\n+ kwargs.pop(\"trust_remote_code\", None)\n+ kwargs.pop(\"low_cpu_mem_usage\", None)\n+ kwargs.pop(\"device_map\", None)\n+ kwargs.pop(\"max_memory\", None)\n+ kwargs.pop(\"offload_folder\", None)\n+ kwargs.pop(\"offload_state_dict\", False)\n+ kwargs.pop(\"load_in_8bit\", False)\n+ kwargs.pop(\"load_in_4bit\", False)\n+ kwargs.pop(\"quantization_config\", None)\n+ kwargs.pop(\"adapter_kwargs\", {})\n+ kwargs.pop(\"adapter_name\", \"default\")\n+ kwargs.pop(\"use_flash_attention_2\", False)\n+\n use_safetensors = kwargs.pop(\"use_safetensors\", None if is_safetensors_available() else False)\n \n if len(kwargs) > 0:\n@@ -108,6 +127,10 @@\n **kwargs,\n )\n else:\n+ config = copy.deepcopy(config)\n+ kwarg_attn_imp = kwargs.pop(\"attn_implementation\", None)\n+ if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp:\n+ config._attn_implementation = kwarg_attn_imp\n model_kwargs = kwargs\n \n if commit_hash is None:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import os\nfrom typing import Callable, Optional, Union\n\nimport torch\nfrom torch.nn import Module\n\nfrom colossalai.interface import pretrained as pretrained_interface\n\n\nclass PretrainedManager:\n old_from_pretrained: Optional[Callable] = None\n\n @staticmethod\n def inject() -> None:\n try:\n from transformers.modeling_utils import PreTrainedModel\n except ImportError:\n return\n # recover bound method to plain function\n PretrainedManager.old_from_pretrained = PreTrainedModel.from_pretrained.__func__\n PreTrainedModel.from_pretrained = new_from_pretrained\n\n @staticmethod\n def recover() -> None:\n try:\n from transformers.modeling_utils import PreTrainedModel\n except ImportError:\n return\n # convert plain function to class method\n PreTrainedModel.from_pretrained = classmethod(PretrainedManager.old_from_pretrained)\n PretrainedManager.old_from_pretrained = None\n\n\n@classmethod\ndef new_from_pretrained(\n cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs\n) -> Module:\n from transformers import GenerationConfig\n from transformers.configuration_utils import PretrainedConfig\n from transformers.modeling_utils import (\n ContextManagers,\n _add_variant,\n cached_file,\n download_url,\n has_file,\n is_offline_mode,\n is_remote_url,\n no_init_weights,\n )\n from transformers.utils import (\n SAFE_WEIGHTS_INDEX_NAME,\n SAFE_WEIGHTS_NAME,\n WEIGHTS_INDEX_NAME,\n WEIGHTS_NAME,\n is_safetensors_available,\n logging,\n )\n\n logger = logging.get_logger(__name__)\n\n config = kwargs.pop(\"config\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n _fast_init = kwargs.pop(\"_fast_init\", True)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n subfolder = kwargs.pop(\"subfolder\", \"\")\n commit_hash = kwargs.pop(\"_commit_hash\", None)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None if is_safetensors_available() else False)\n\n if len(kwargs) > 0:\n logger.warning(f\"Below kwargs may be ignored: {list(kwargs.keys())}\")\n\n from_pt = True\n\n user_agent = {\"file_type\": \"model\", \"framework\": \"pytorch\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n if is_offline_mode() and not local_files_only:\n logger.info(\"Offline mode: forcing local_files_only=True\")\n local_files_only = True\n\n # Load config if we don't provide a configuration\n if not isinstance(config, PretrainedConfig):\n config_path = config if config is not None else pretrained_model_name_or_path\n config, model_kwargs = cls.config_class.from_pretrained(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n _from_auto=from_auto_class,\n _from_pipeline=from_pipeline,\n **kwargs,\n )\n else:\n model_kwargs = kwargs\n\n if commit_hash is None:\n commit_hash = getattr(config, \"_commit_hash\", None)\n\n # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the\n # index of the files.\n\n if pretrained_model_name_or_path is not None:\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if is_local:\n if use_safetensors is not False and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))\n ):\n # Load from a safetensors checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)\n )\n elif use_safetensors is not False and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant))\n ):\n # Load from a sharded safetensors checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)\n )\n elif os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))\n ):\n # Load from a PyTorch checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)\n )\n elif os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))\n ):\n # Load from a sharded PyTorch checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)\n )\n else:\n raise EnvironmentError(\n f\"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory\"\n f\" {pretrained_model_name_or_path}.\"\n )\n elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):\n archive_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n filename = pretrained_model_name_or_path\n resolved_archive_file = download_url(pretrained_model_name_or_path)\n else:\n # set correct filename\n if use_safetensors is not False:\n filename = _add_variant(SAFE_WEIGHTS_NAME, variant)\n else:\n filename = _add_variant(WEIGHTS_NAME, variant)\n\n try:\n # Load from URL or cache if already cached\n cached_file_kwargs = {\n \"cache_dir\": cache_dir,\n \"force_download\": force_download,\n \"proxies\": proxies,\n \"resume_download\": resume_download,\n \"local_files_only\": local_files_only,\n \"use_auth_token\": use_auth_token,\n \"user_agent\": user_agent,\n \"revision\": revision,\n \"subfolder\": subfolder,\n \"_raise_exceptions_for_missing_entries\": False,\n \"_commit_hash\": commit_hash,\n }\n resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)\n\n # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None\n # result when internet is up, the repo and revision exist, but the file does not.\n if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant):\n # Maybe the checkpoint is sharded, we try to grab the index name in this case.\n resolved_archive_file = cached_file(\n pretrained_model_name_or_path,\n _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant),\n **cached_file_kwargs,\n )\n if resolved_archive_file is not None:\n pass\n elif use_safetensors:\n raise EnvironmentError(\n f\" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} and thus cannot be loaded with `safetensors`. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`.\"\n )\n else:\n # This repo has no safetensors file of any kind, we switch to PyTorch.\n filename = _add_variant(WEIGHTS_NAME, variant)\n resolved_archive_file = cached_file(\n pretrained_model_name_or_path, filename, **cached_file_kwargs\n )\n if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant):\n # Maybe the checkpoint is sharded, we try to grab the index name in this case.\n resolved_archive_file = cached_file(\n pretrained_model_name_or_path,\n _add_variant(WEIGHTS_INDEX_NAME, variant),\n **cached_file_kwargs,\n )\n if resolved_archive_file is not None:\n pass\n if resolved_archive_file is None:\n # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error\n # message.\n has_file_kwargs = {\n \"revision\": revision,\n \"proxies\": proxies,\n \"use_auth_token\": use_auth_token,\n }\n if variant is not None and has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named\"\n f\" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant\"\n f\" {variant}. Use `variant=None` to load this model from those weights.\"\n )\n else:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named\"\n f\" {_add_variant(WEIGHTS_NAME, variant)}\"\n )\n except EnvironmentError:\n # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted\n # to the original exception.\n raise\n except Exception:\n # For any other exception, we throw a generic error.\n raise EnvironmentError(\n f\"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it\"\n \" from 'https://huggingface.co/models', make sure you don't have a local directory with the\"\n f\" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a\"\n f\" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}.\"\n )\n\n if is_local:\n logger.info(f\"loading weights file {archive_file}\")\n resolved_archive_file = archive_file\n else:\n logger.info(f\"loading weights file {filename} from cache at {resolved_archive_file}\")\n else:\n resolved_archive_file = None\n\n if from_pt:\n # set dtype to instantiate the model under:\n # 1. If torch_dtype is not None, we use that dtype\n dtype_orig = None\n\n if torch_dtype is not None:\n if not isinstance(torch_dtype, torch.dtype):\n raise ValueError(f\"`torch_dtype` can be either `torch.dtype` or `None`, but received {torch_dtype}\")\n dtype_orig = cls._set_default_torch_dtype(torch_dtype)\n\n config.name_or_path = pretrained_model_name_or_path\n\n # Instantiate model.\n init_contexts = [no_init_weights(_enable=_fast_init)]\n\n with ContextManagers(init_contexts):\n model = cls(config, *model_args, **model_kwargs)\n\n if from_pt:\n # restore default dtype\n if dtype_orig is not None:\n torch.set_default_dtype(dtype_orig)\n\n # make sure token embedding weights are still tied if needed\n model.tie_weights()\n\n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n\n # If it is a model with generation capabilities, attempt to load the generation config\n if model.can_generate():\n try:\n model.generation_config = GenerationConfig.from_pretrained(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n _from_auto=from_auto_class,\n _from_pipeline=from_pipeline,\n **kwargs,\n )\n except (OSError, TypeError):\n logger.info(\"Generation config file not found, using a generation config created from the model config.\")\n\n # set pretrained path\n if resolved_archive_file:\n pretrained_interface.set_pretrained_path(model, resolved_archive_file)\n\n return model\n", "path": "colossalai/lazy/pretrained.py"}], "after_files": [{"content": "import copy\nimport os\nfrom typing import Callable, Optional, Union\n\nimport torch\nfrom torch.nn import Module\n\nfrom colossalai.interface import pretrained as pretrained_interface\n\n\nclass PretrainedManager:\n old_from_pretrained: Optional[Callable] = None\n\n @staticmethod\n def inject() -> None:\n try:\n from transformers.modeling_utils import PreTrainedModel\n except ImportError:\n return\n # recover bound method to plain function\n PretrainedManager.old_from_pretrained = PreTrainedModel.from_pretrained.__func__\n PreTrainedModel.from_pretrained = new_from_pretrained\n\n @staticmethod\n def recover() -> None:\n try:\n from transformers.modeling_utils import PreTrainedModel\n except ImportError:\n return\n # convert plain function to class method\n PreTrainedModel.from_pretrained = classmethod(PretrainedManager.old_from_pretrained)\n PretrainedManager.old_from_pretrained = None\n\n\n@classmethod\ndef new_from_pretrained(\n cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs\n) -> Module:\n from transformers import GenerationConfig\n from transformers.configuration_utils import PretrainedConfig\n from transformers.modeling_utils import (\n ContextManagers,\n _add_variant,\n cached_file,\n download_url,\n has_file,\n is_offline_mode,\n is_remote_url,\n no_init_weights,\n )\n from transformers.utils import (\n SAFE_WEIGHTS_INDEX_NAME,\n SAFE_WEIGHTS_NAME,\n WEIGHTS_INDEX_NAME,\n WEIGHTS_NAME,\n is_safetensors_available,\n logging,\n )\n\n logger = logging.get_logger(__name__)\n\n config = kwargs.pop(\"config\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n _fast_init = kwargs.pop(\"_fast_init\", True)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n subfolder = kwargs.pop(\"subfolder\", \"\")\n commit_hash = kwargs.pop(\"_commit_hash\", None)\n variant = kwargs.pop(\"variant\", None)\n\n kwargs.pop(\"state_dict\", None)\n kwargs.pop(\"from_tf\", False)\n kwargs.pop(\"from_flax\", False)\n kwargs.pop(\"output_loading_info\", False)\n kwargs.pop(\"trust_remote_code\", None)\n kwargs.pop(\"low_cpu_mem_usage\", None)\n kwargs.pop(\"device_map\", None)\n kwargs.pop(\"max_memory\", None)\n kwargs.pop(\"offload_folder\", None)\n kwargs.pop(\"offload_state_dict\", False)\n kwargs.pop(\"load_in_8bit\", False)\n kwargs.pop(\"load_in_4bit\", False)\n kwargs.pop(\"quantization_config\", None)\n kwargs.pop(\"adapter_kwargs\", {})\n kwargs.pop(\"adapter_name\", \"default\")\n kwargs.pop(\"use_flash_attention_2\", False)\n\n use_safetensors = kwargs.pop(\"use_safetensors\", None if is_safetensors_available() else False)\n\n if len(kwargs) > 0:\n logger.warning(f\"Below kwargs may be ignored: {list(kwargs.keys())}\")\n\n from_pt = True\n\n user_agent = {\"file_type\": \"model\", \"framework\": \"pytorch\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n if is_offline_mode() and not local_files_only:\n logger.info(\"Offline mode: forcing local_files_only=True\")\n local_files_only = True\n\n # Load config if we don't provide a configuration\n if not isinstance(config, PretrainedConfig):\n config_path = config if config is not None else pretrained_model_name_or_path\n config, model_kwargs = cls.config_class.from_pretrained(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n _from_auto=from_auto_class,\n _from_pipeline=from_pipeline,\n **kwargs,\n )\n else:\n config = copy.deepcopy(config)\n kwarg_attn_imp = kwargs.pop(\"attn_implementation\", None)\n if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp:\n config._attn_implementation = kwarg_attn_imp\n model_kwargs = kwargs\n\n if commit_hash is None:\n commit_hash = getattr(config, \"_commit_hash\", None)\n\n # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the\n # index of the files.\n\n if pretrained_model_name_or_path is not None:\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if is_local:\n if use_safetensors is not False and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))\n ):\n # Load from a safetensors checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)\n )\n elif use_safetensors is not False and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant))\n ):\n # Load from a sharded safetensors checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)\n )\n elif os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))\n ):\n # Load from a PyTorch checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)\n )\n elif os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))\n ):\n # Load from a sharded PyTorch checkpoint\n archive_file = os.path.join(\n pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)\n )\n else:\n raise EnvironmentError(\n f\"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory\"\n f\" {pretrained_model_name_or_path}.\"\n )\n elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):\n archive_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n filename = pretrained_model_name_or_path\n resolved_archive_file = download_url(pretrained_model_name_or_path)\n else:\n # set correct filename\n if use_safetensors is not False:\n filename = _add_variant(SAFE_WEIGHTS_NAME, variant)\n else:\n filename = _add_variant(WEIGHTS_NAME, variant)\n\n try:\n # Load from URL or cache if already cached\n cached_file_kwargs = {\n \"cache_dir\": cache_dir,\n \"force_download\": force_download,\n \"proxies\": proxies,\n \"resume_download\": resume_download,\n \"local_files_only\": local_files_only,\n \"use_auth_token\": use_auth_token,\n \"user_agent\": user_agent,\n \"revision\": revision,\n \"subfolder\": subfolder,\n \"_raise_exceptions_for_missing_entries\": False,\n \"_commit_hash\": commit_hash,\n }\n resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)\n\n # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None\n # result when internet is up, the repo and revision exist, but the file does not.\n if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant):\n # Maybe the checkpoint is sharded, we try to grab the index name in this case.\n resolved_archive_file = cached_file(\n pretrained_model_name_or_path,\n _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant),\n **cached_file_kwargs,\n )\n if resolved_archive_file is not None:\n pass\n elif use_safetensors:\n raise EnvironmentError(\n f\" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} and thus cannot be loaded with `safetensors`. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`.\"\n )\n else:\n # This repo has no safetensors file of any kind, we switch to PyTorch.\n filename = _add_variant(WEIGHTS_NAME, variant)\n resolved_archive_file = cached_file(\n pretrained_model_name_or_path, filename, **cached_file_kwargs\n )\n if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant):\n # Maybe the checkpoint is sharded, we try to grab the index name in this case.\n resolved_archive_file = cached_file(\n pretrained_model_name_or_path,\n _add_variant(WEIGHTS_INDEX_NAME, variant),\n **cached_file_kwargs,\n )\n if resolved_archive_file is not None:\n pass\n if resolved_archive_file is None:\n # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error\n # message.\n has_file_kwargs = {\n \"revision\": revision,\n \"proxies\": proxies,\n \"use_auth_token\": use_auth_token,\n }\n if variant is not None and has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named\"\n f\" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant\"\n f\" {variant}. Use `variant=None` to load this model from those weights.\"\n )\n else:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named\"\n f\" {_add_variant(WEIGHTS_NAME, variant)}\"\n )\n except EnvironmentError:\n # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted\n # to the original exception.\n raise\n except Exception:\n # For any other exception, we throw a generic error.\n raise EnvironmentError(\n f\"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it\"\n \" from 'https://huggingface.co/models', make sure you don't have a local directory with the\"\n f\" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a\"\n f\" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}.\"\n )\n\n if is_local:\n logger.info(f\"loading weights file {archive_file}\")\n resolved_archive_file = archive_file\n else:\n logger.info(f\"loading weights file {filename} from cache at {resolved_archive_file}\")\n else:\n resolved_archive_file = None\n\n if from_pt:\n # set dtype to instantiate the model under:\n # 1. If torch_dtype is not None, we use that dtype\n dtype_orig = None\n\n if torch_dtype is not None:\n if not isinstance(torch_dtype, torch.dtype):\n raise ValueError(f\"`torch_dtype` can be either `torch.dtype` or `None`, but received {torch_dtype}\")\n dtype_orig = cls._set_default_torch_dtype(torch_dtype)\n\n config.name_or_path = pretrained_model_name_or_path\n\n # Instantiate model.\n init_contexts = [no_init_weights(_enable=_fast_init)]\n\n with ContextManagers(init_contexts):\n model = cls(config, *model_args, **model_kwargs)\n\n if from_pt:\n # restore default dtype\n if dtype_orig is not None:\n torch.set_default_dtype(dtype_orig)\n\n # make sure token embedding weights are still tied if needed\n model.tie_weights()\n\n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n\n # If it is a model with generation capabilities, attempt to load the generation config\n if model.can_generate():\n try:\n model.generation_config = GenerationConfig.from_pretrained(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n _from_auto=from_auto_class,\n _from_pipeline=from_pipeline,\n **kwargs,\n )\n except (OSError, TypeError):\n logger.info(\"Generation config file not found, using a generation config created from the model config.\")\n\n # set pretrained path\n if resolved_archive_file:\n pretrained_interface.set_pretrained_path(model, resolved_archive_file)\n\n return model\n", "path": "colossalai/lazy/pretrained.py"}]} | 3,862 | 441 |
gh_patches_debug_36070 | rasdani/github-patches | git_diff | getnikola__nikola-2178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The `IPython.nbconvert` package has been deprecated.
IPython 4.0 was just released (http://blog.jupyter.org/2015/08/12/first-release-of-jupyter/). The `IPython.nbconvert` package has been deprecated. The [`nbconvert`-based ipynb compiler](https://github.com/getnikola/nikola/blob/15217bc93f0af0d70ffe33e0ea067d81ddf32403/nikola/plugins/compile/ipynb.py) should be updated where necessary to reflect any API changes in the 4.0 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/ipynb.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2013-2015 Damián Avila, Chris Warrick and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Implementation of compile_html based on nbconvert."""
28
29 from __future__ import unicode_literals, print_function
30 import io
31 import os
32 import sys
33
34 try:
35 import IPython
36 from IPython.nbconvert.exporters import HTMLExporter
37 if IPython.version_info[0] >= 3: # API changed with 3.0.0
38 from IPython import nbformat
39 current_nbformat = nbformat.current_nbformat
40 from IPython.kernel import kernelspec
41 else:
42 import IPython.nbformat.current as nbformat
43 current_nbformat = 'json'
44 kernelspec = None
45
46 from IPython.config import Config
47 flag = True
48 except ImportError:
49 flag = None
50
51 from nikola.plugin_categories import PageCompiler
52 from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
53
54
55 class CompileIPynb(PageCompiler):
56 """Compile IPynb into HTML."""
57
58 name = "ipynb"
59 friendly_name = "Jupyter/IPython Notebook"
60 demote_headers = True
61 default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'
62
63 def set_site(self, site):
64 """Set Nikola site."""
65 self.logger = get_logger('compile_ipynb', STDERR_HANDLER)
66 super(CompileIPynb, self).set_site(site)
67
68 def compile_html_string(self, source, is_two_file=True):
69 """Export notebooks as HTML strings."""
70 if flag is None:
71 req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
72 HTMLExporter.default_template = 'basic'
73 c = Config(self.site.config['IPYNB_CONFIG'])
74 exportHtml = HTMLExporter(config=c)
75 with io.open(source, "r", encoding="utf8") as in_file:
76 nb_json = nbformat.read(in_file, current_nbformat)
77 (body, resources) = exportHtml.from_notebook_node(nb_json)
78 return body
79
80 def compile_html(self, source, dest, is_two_file=True):
81 """Compile source file into HTML and save as dest."""
82 makedirs(os.path.dirname(dest))
83 with io.open(dest, "w+", encoding="utf8") as out_file:
84 out_file.write(self.compile_html_string(source, is_two_file))
85
86 def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
87 """Read metadata directly from ipynb file.
88
89 As ipynb file support arbitrary metadata as json, the metadata used by Nikola
90 will be assume to be in the 'nikola' subfield.
91 """
92 if flag is None:
93 req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
94 source = post.source_path
95 with io.open(source, "r", encoding="utf8") as in_file:
96 nb_json = nbformat.read(in_file, current_nbformat)
97 # Metadata might not exist in two-file posts or in hand-crafted
98 # .ipynb files.
99 return nb_json.get('metadata', {}).get('nikola', {})
100
101 def create_post(self, path, **kw):
102 """Create a new post."""
103 if flag is None:
104 req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
105 content = kw.pop('content', None)
106 onefile = kw.pop('onefile', False)
107 kernel = kw.pop('ipython_kernel', None)
108 # is_page is not needed to create the file
109 kw.pop('is_page', False)
110
111 metadata = {}
112 metadata.update(self.default_metadata)
113 metadata.update(kw)
114
115 makedirs(os.path.dirname(path))
116
117 if content.startswith("{"):
118 # imported .ipynb file, guaranteed to start with "{" because it’s JSON.
119 nb = nbformat.reads(content, current_nbformat)
120 else:
121 if IPython.version_info[0] >= 3:
122 nb = nbformat.v4.new_notebook()
123 nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
124 else:
125 nb = nbformat.new_notebook()
126 nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]
127
128 if kernelspec is not None:
129 if kernel is None:
130 kernel = self.default_kernel
131 self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel))
132
133 IPYNB_KERNELS = {}
134 ksm = kernelspec.KernelSpecManager()
135 for k in ksm.find_kernel_specs():
136 IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
137 IPYNB_KERNELS[k]['name'] = k
138 del IPYNB_KERNELS[k]['argv']
139
140 if kernel not in IPYNB_KERNELS:
141 self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
142 self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
143 raise Exception('Unknown kernel "{0}"'.format(kernel))
144
145 nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
146 else:
147 # Older IPython versions don’t need kernelspecs.
148 pass
149
150 if onefile:
151 nb["metadata"]["nikola"] = metadata
152
153 with io.open(path, "w+", encoding="utf8") as fd:
154 if IPython.version_info[0] >= 3:
155 nbformat.write(nb, fd, 4)
156 else:
157 nbformat.write(nb, fd, 'ipynb')
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/compile/ipynb.py b/nikola/plugins/compile/ipynb.py
--- a/nikola/plugins/compile/ipynb.py
+++ b/nikola/plugins/compile/ipynb.py
@@ -32,21 +32,33 @@
import sys
try:
- import IPython
- from IPython.nbconvert.exporters import HTMLExporter
- if IPython.version_info[0] >= 3: # API changed with 3.0.0
- from IPython import nbformat
- current_nbformat = nbformat.current_nbformat
- from IPython.kernel import kernelspec
- else:
- import IPython.nbformat.current as nbformat
- current_nbformat = 'json'
- kernelspec = None
-
- from IPython.config import Config
+ from nbconvert.exporters import HTMLExporter
+ import nbformat
+ current_nbformat = nbformat.current_nbformat
+ from jupyter_client import kernelspec
+ from traitlets.config import Config
flag = True
+ ipy_modern = True
except ImportError:
- flag = None
+ try:
+ import IPython
+ from IPython.nbconvert.exporters import HTMLExporter
+ if IPython.version_info[0] >= 3: # API changed with 3.0.0
+ from IPython import nbformat
+ current_nbformat = nbformat.current_nbformat
+ from IPython.kernel import kernelspec
+ ipy_modern = True
+ else:
+ import IPython.nbformat.current as nbformat
+ current_nbformat = 'json'
+ kernelspec = None
+ ipy_modern = False
+
+ from IPython.config import Config
+ flag = True
+ except ImportError:
+ flag = None
+ ipy_modern = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
@@ -118,7 +130,7 @@
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
- if IPython.version_info[0] >= 3:
+ if ipy_modern:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
else:
@@ -151,7 +163,7 @@
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf8") as fd:
- if IPython.version_info[0] >= 3:
+ if ipy_modern:
nbformat.write(nb, fd, 4)
else:
nbformat.write(nb, fd, 'ipynb')
| {"golden_diff": "diff --git a/nikola/plugins/compile/ipynb.py b/nikola/plugins/compile/ipynb.py\n--- a/nikola/plugins/compile/ipynb.py\n+++ b/nikola/plugins/compile/ipynb.py\n@@ -32,21 +32,33 @@\n import sys\n \n try:\n- import IPython\n- from IPython.nbconvert.exporters import HTMLExporter\n- if IPython.version_info[0] >= 3: # API changed with 3.0.0\n- from IPython import nbformat\n- current_nbformat = nbformat.current_nbformat\n- from IPython.kernel import kernelspec\n- else:\n- import IPython.nbformat.current as nbformat\n- current_nbformat = 'json'\n- kernelspec = None\n-\n- from IPython.config import Config\n+ from nbconvert.exporters import HTMLExporter\n+ import nbformat\n+ current_nbformat = nbformat.current_nbformat\n+ from jupyter_client import kernelspec\n+ from traitlets.config import Config\n flag = True\n+ ipy_modern = True\n except ImportError:\n- flag = None\n+ try:\n+ import IPython\n+ from IPython.nbconvert.exporters import HTMLExporter\n+ if IPython.version_info[0] >= 3: # API changed with 3.0.0\n+ from IPython import nbformat\n+ current_nbformat = nbformat.current_nbformat\n+ from IPython.kernel import kernelspec\n+ ipy_modern = True\n+ else:\n+ import IPython.nbformat.current as nbformat\n+ current_nbformat = 'json'\n+ kernelspec = None\n+ ipy_modern = False\n+\n+ from IPython.config import Config\n+ flag = True\n+ except ImportError:\n+ flag = None\n+ ipy_modern = None\n \n from nikola.plugin_categories import PageCompiler\n from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER\n@@ -118,7 +130,7 @@\n # imported .ipynb file, guaranteed to start with \"{\" because it\u2019s JSON.\n nb = nbformat.reads(content, current_nbformat)\n else:\n- if IPython.version_info[0] >= 3:\n+ if ipy_modern:\n nb = nbformat.v4.new_notebook()\n nb[\"cells\"] = [nbformat.v4.new_markdown_cell(content)]\n else:\n@@ -151,7 +163,7 @@\n nb[\"metadata\"][\"nikola\"] = metadata\n \n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n- if IPython.version_info[0] >= 3:\n+ if ipy_modern:\n nbformat.write(nb, fd, 4)\n else:\n nbformat.write(nb, fd, 'ipynb')\n", "issue": " The `IPython.nbconvert` package has been deprecated.\nIPython 4.0 was just released (http://blog.jupyter.org/2015/08/12/first-release-of-jupyter/). The `IPython.nbconvert` package has been deprecated. The [`nbconvert`-based ipynb compiler](https://github.com/getnikola/nikola/blob/15217bc93f0af0d70ffe33e0ea067d81ddf32403/nikola/plugins/compile/ipynb.py) should be updated where necessary to reflect any API changes in the 4.0 release.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2013-2015 Dami\u00e1n Avila, Chris Warrick and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on nbconvert.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nimport io\nimport os\nimport sys\n\ntry:\n import IPython\n from IPython.nbconvert.exporters import HTMLExporter\n if IPython.version_info[0] >= 3: # API changed with 3.0.0\n from IPython import nbformat\n current_nbformat = nbformat.current_nbformat\n from IPython.kernel import kernelspec\n else:\n import IPython.nbformat.current as nbformat\n current_nbformat = 'json'\n kernelspec = None\n\n from IPython.config import Config\n flag = True\nexcept ImportError:\n flag = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER\n\n\nclass CompileIPynb(PageCompiler):\n \"\"\"Compile IPynb into HTML.\"\"\"\n\n name = \"ipynb\"\n friendly_name = \"Jupyter/IPython Notebook\"\n demote_headers = True\n default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.logger = get_logger('compile_ipynb', STDERR_HANDLER)\n super(CompileIPynb, self).set_site(site)\n\n def compile_html_string(self, source, is_two_file=True):\n \"\"\"Export notebooks as HTML strings.\"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n HTMLExporter.default_template = 'basic'\n c = Config(self.site.config['IPYNB_CONFIG'])\n exportHtml = HTMLExporter(config=c)\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb_json = nbformat.read(in_file, current_nbformat)\n (body, resources) = exportHtml.from_notebook_node(nb_json)\n return body\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n out_file.write(self.compile_html_string(source, is_two_file))\n\n def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):\n \"\"\"Read metadata directly from ipynb file.\n\n As ipynb file support arbitrary metadata as json, the metadata used by Nikola\n will be assume to be in the 'nikola' subfield.\n \"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n source = post.source_path\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb_json = nbformat.read(in_file, current_nbformat)\n # Metadata might not exist in two-file posts or in hand-crafted\n # .ipynb files.\n return nb_json.get('metadata', {}).get('nikola', {})\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n kernel = kw.pop('ipython_kernel', None)\n # is_page is not needed to create the file\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n\n makedirs(os.path.dirname(path))\n\n if content.startswith(\"{\"):\n # imported .ipynb file, guaranteed to start with \"{\" because it\u2019s JSON.\n nb = nbformat.reads(content, current_nbformat)\n else:\n if IPython.version_info[0] >= 3:\n nb = nbformat.v4.new_notebook()\n nb[\"cells\"] = [nbformat.v4.new_markdown_cell(content)]\n else:\n nb = nbformat.new_notebook()\n nb[\"worksheets\"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]\n\n if kernelspec is not None:\n if kernel is None:\n kernel = self.default_kernel\n self.logger.notice('No kernel specified, assuming \"{0}\".'.format(kernel))\n\n IPYNB_KERNELS = {}\n ksm = kernelspec.KernelSpecManager()\n for k in ksm.find_kernel_specs():\n IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()\n IPYNB_KERNELS[k]['name'] = k\n del IPYNB_KERNELS[k]['argv']\n\n if kernel not in IPYNB_KERNELS:\n self.logger.error('Unknown kernel \"{0}\". Maybe you mispelled it?'.format(kernel))\n self.logger.info(\"Available kernels: {0}\".format(\", \".join(sorted(IPYNB_KERNELS))))\n raise Exception('Unknown kernel \"{0}\"'.format(kernel))\n\n nb[\"metadata\"][\"kernelspec\"] = IPYNB_KERNELS[kernel]\n else:\n # Older IPython versions don\u2019t need kernelspecs.\n pass\n\n if onefile:\n nb[\"metadata\"][\"nikola\"] = metadata\n\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if IPython.version_info[0] >= 3:\n nbformat.write(nb, fd, 4)\n else:\n nbformat.write(nb, fd, 'ipynb')\n", "path": "nikola/plugins/compile/ipynb.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2013-2015 Dami\u00e1n Avila, Chris Warrick and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on nbconvert.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nimport io\nimport os\nimport sys\n\ntry:\n from nbconvert.exporters import HTMLExporter\n import nbformat\n current_nbformat = nbformat.current_nbformat\n from jupyter_client import kernelspec\n from traitlets.config import Config\n flag = True\n ipy_modern = True\nexcept ImportError:\n try:\n import IPython\n from IPython.nbconvert.exporters import HTMLExporter\n if IPython.version_info[0] >= 3: # API changed with 3.0.0\n from IPython import nbformat\n current_nbformat = nbformat.current_nbformat\n from IPython.kernel import kernelspec\n ipy_modern = True\n else:\n import IPython.nbformat.current as nbformat\n current_nbformat = 'json'\n kernelspec = None\n ipy_modern = False\n\n from IPython.config import Config\n flag = True\n except ImportError:\n flag = None\n ipy_modern = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER\n\n\nclass CompileIPynb(PageCompiler):\n \"\"\"Compile IPynb into HTML.\"\"\"\n\n name = \"ipynb\"\n friendly_name = \"Jupyter/IPython Notebook\"\n demote_headers = True\n default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.logger = get_logger('compile_ipynb', STDERR_HANDLER)\n super(CompileIPynb, self).set_site(site)\n\n def compile_html_string(self, source, is_two_file=True):\n \"\"\"Export notebooks as HTML strings.\"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n HTMLExporter.default_template = 'basic'\n c = Config(self.site.config['IPYNB_CONFIG'])\n exportHtml = HTMLExporter(config=c)\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb_json = nbformat.read(in_file, current_nbformat)\n (body, resources) = exportHtml.from_notebook_node(nb_json)\n return body\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n out_file.write(self.compile_html_string(source, is_two_file))\n\n def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):\n \"\"\"Read metadata directly from ipynb file.\n\n As ipynb file support arbitrary metadata as json, the metadata used by Nikola\n will be assume to be in the 'nikola' subfield.\n \"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n source = post.source_path\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb_json = nbformat.read(in_file, current_nbformat)\n # Metadata might not exist in two-file posts or in hand-crafted\n # .ipynb files.\n return nb_json.get('metadata', {}).get('nikola', {})\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n if flag is None:\n req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n kernel = kw.pop('ipython_kernel', None)\n # is_page is not needed to create the file\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n\n makedirs(os.path.dirname(path))\n\n if content.startswith(\"{\"):\n # imported .ipynb file, guaranteed to start with \"{\" because it\u2019s JSON.\n nb = nbformat.reads(content, current_nbformat)\n else:\n if ipy_modern:\n nb = nbformat.v4.new_notebook()\n nb[\"cells\"] = [nbformat.v4.new_markdown_cell(content)]\n else:\n nb = nbformat.new_notebook()\n nb[\"worksheets\"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]\n\n if kernelspec is not None:\n if kernel is None:\n kernel = self.default_kernel\n self.logger.notice('No kernel specified, assuming \"{0}\".'.format(kernel))\n\n IPYNB_KERNELS = {}\n ksm = kernelspec.KernelSpecManager()\n for k in ksm.find_kernel_specs():\n IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()\n IPYNB_KERNELS[k]['name'] = k\n del IPYNB_KERNELS[k]['argv']\n\n if kernel not in IPYNB_KERNELS:\n self.logger.error('Unknown kernel \"{0}\". Maybe you mispelled it?'.format(kernel))\n self.logger.info(\"Available kernels: {0}\".format(\", \".join(sorted(IPYNB_KERNELS))))\n raise Exception('Unknown kernel \"{0}\"'.format(kernel))\n\n nb[\"metadata\"][\"kernelspec\"] = IPYNB_KERNELS[kernel]\n else:\n # Older IPython versions don\u2019t need kernelspecs.\n pass\n\n if onefile:\n nb[\"metadata\"][\"nikola\"] = metadata\n\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if ipy_modern:\n nbformat.write(nb, fd, 4)\n else:\n nbformat.write(nb, fd, 'ipynb')\n", "path": "nikola/plugins/compile/ipynb.py"}]} | 2,271 | 646 |
gh_patches_debug_33032 | rasdani/github-patches | git_diff | projectmesa__mesa-1262 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove Sigma.js backend for network visualization
Reading #388, I conclude that Sigma.js is less featureful than D3.js. We should just stick to polishing the D3.js version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/virus_on_network/virus_on_network/server.py`
Content:
```
1 import math
2
3 from mesa.visualization.ModularVisualization import ModularServer
4 from mesa.visualization.UserParam import UserSettableParameter
5 from mesa.visualization.modules import ChartModule
6 from mesa.visualization.modules import NetworkModule
7 from mesa.visualization.modules import TextElement
8 from .model import VirusOnNetwork, State, number_infected
9
10
11 def network_portrayal(G):
12 # The model ensures there is always 1 agent per node
13
14 def node_color(agent):
15 return {State.INFECTED: "#FF0000", State.SUSCEPTIBLE: "#008000"}.get(
16 agent.state, "#808080"
17 )
18
19 def edge_color(agent1, agent2):
20 if State.RESISTANT in (agent1.state, agent2.state):
21 return "#000000"
22 return "#e8e8e8"
23
24 def edge_width(agent1, agent2):
25 if State.RESISTANT in (agent1.state, agent2.state):
26 return 3
27 return 2
28
29 def get_agents(source, target):
30 return G.nodes[source]["agent"][0], G.nodes[target]["agent"][0]
31
32 portrayal = dict()
33 portrayal["nodes"] = [
34 {
35 "size": 6,
36 "color": node_color(agents[0]),
37 "tooltip": f"id: {agents[0].unique_id}<br>state: {agents[0].state.name}",
38 }
39 for (_, agents) in G.nodes.data("agent")
40 ]
41
42 portrayal["edges"] = [
43 {
44 "source": source,
45 "target": target,
46 "color": edge_color(*get_agents(source, target)),
47 "width": edge_width(*get_agents(source, target)),
48 }
49 for (source, target) in G.edges
50 ]
51
52 return portrayal
53
54
55 network = NetworkModule(network_portrayal, 500, 500, library="d3")
56 chart = ChartModule(
57 [
58 {"Label": "Infected", "Color": "#FF0000"},
59 {"Label": "Susceptible", "Color": "#008000"},
60 {"Label": "Resistant", "Color": "#808080"},
61 ]
62 )
63
64
65 class MyTextElement(TextElement):
66 def render(self, model):
67 ratio = model.resistant_susceptible_ratio()
68 ratio_text = "∞" if ratio is math.inf else f"{ratio:.2f}"
69 infected_text = str(number_infected(model))
70
71 return "Resistant/Susceptible Ratio: {}<br>Infected Remaining: {}".format(
72 ratio_text, infected_text
73 )
74
75
76 model_params = {
77 "num_nodes": UserSettableParameter(
78 "slider",
79 "Number of agents",
80 10,
81 10,
82 100,
83 1,
84 description="Choose how many agents to include in the model",
85 ),
86 "avg_node_degree": UserSettableParameter(
87 "slider", "Avg Node Degree", 3, 3, 8, 1, description="Avg Node Degree"
88 ),
89 "initial_outbreak_size": UserSettableParameter(
90 "slider",
91 "Initial Outbreak Size",
92 1,
93 1,
94 10,
95 1,
96 description="Initial Outbreak Size",
97 ),
98 "virus_spread_chance": UserSettableParameter(
99 "slider",
100 "Virus Spread Chance",
101 0.4,
102 0.0,
103 1.0,
104 0.1,
105 description="Probability that susceptible neighbor will be infected",
106 ),
107 "virus_check_frequency": UserSettableParameter(
108 "slider",
109 "Virus Check Frequency",
110 0.4,
111 0.0,
112 1.0,
113 0.1,
114 description="Frequency the nodes check whether they are infected by " "a virus",
115 ),
116 "recovery_chance": UserSettableParameter(
117 "slider",
118 "Recovery Chance",
119 0.3,
120 0.0,
121 1.0,
122 0.1,
123 description="Probability that the virus will be removed",
124 ),
125 "gain_resistance_chance": UserSettableParameter(
126 "slider",
127 "Gain Resistance Chance",
128 0.5,
129 0.0,
130 1.0,
131 0.1,
132 description="Probability that a recovered agent will become "
133 "resistant to this virus in the future",
134 ),
135 }
136
137 server = ModularServer(
138 VirusOnNetwork, [network, MyTextElement(), chart], "Virus Model", model_params
139 )
140 server.port = 8521
141
```
Path: `examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py`
Content:
```
1 from mesa.visualization.ModularVisualization import ModularServer
2 from mesa.visualization.UserParam import UserSettableParameter
3 from mesa.visualization.modules import ChartModule
4 from mesa.visualization.modules import NetworkModule
5 from .model import BoltzmannWealthModelNetwork
6
7
8 def network_portrayal(G):
9 # The model ensures there is 0 or 1 agent per node
10
11 portrayal = dict()
12 portrayal["nodes"] = [
13 {
14 "id": node_id,
15 "size": 3 if agents else 1,
16 "color": "#CC0000" if not agents or agents[0].wealth == 0 else "#007959",
17 "label": None
18 if not agents
19 else f"Agent:{agents[0].unique_id} Wealth:{agents[0].wealth}",
20 }
21 for (node_id, agents) in G.nodes.data("agent")
22 ]
23
24 portrayal["edges"] = [
25 {"id": edge_id, "source": source, "target": target, "color": "#000000"}
26 for edge_id, (source, target) in enumerate(G.edges)
27 ]
28
29 return portrayal
30
31
32 grid = NetworkModule(network_portrayal, 500, 500, library="sigma")
33 chart = ChartModule(
34 [{"Label": "Gini", "Color": "Black"}], data_collector_name="datacollector"
35 )
36
37 model_params = {
38 "num_agents": UserSettableParameter(
39 "slider",
40 "Number of agents",
41 7,
42 2,
43 10,
44 1,
45 description="Choose how many agents to include in the model",
46 ),
47 "num_nodes": UserSettableParameter(
48 "slider",
49 "Number of nodes",
50 10,
51 3,
52 12,
53 1,
54 description="Choose how many nodes to include in the model, with at "
55 "least the same number of agents",
56 ),
57 }
58
59 server = ModularServer(
60 BoltzmannWealthModelNetwork, [grid, chart], "Money Model", model_params
61 )
62 server.port = 8521
63
```
Path: `mesa/visualization/modules/NetworkVisualization.py`
Content:
```
1 """
2 Network Visualization Module
3 ============
4
5 Module for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.
6
7 """
8 from mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE
9
10
11 class NetworkModule(VisualizationElement):
12 package_includes = []
13
14 def __init__(
15 self, portrayal_method, canvas_height=500, canvas_width=500, library="sigma"
16 ):
17 library_types = ["sigma", "d3"]
18 if library not in library_types:
19 raise ValueError(
20 f"Invalid javascript library type. Expected one of: {library_types}"
21 )
22
23 NetworkModule.package_includes = (
24 ["NetworkModule_sigma.js", "sigma.min.js"]
25 if library == "sigma"
26 else ["NetworkModule_d3.js", D3_JS_FILE]
27 )
28
29 self.portrayal_method = portrayal_method
30 self.canvas_height = canvas_height
31 self.canvas_width = canvas_width
32 new_element = f"new NetworkModule({self.canvas_width}, {self.canvas_height})"
33 self.js_code = "elements.push(" + new_element + ");"
34
35 def render(self, model):
36 return self.portrayal_method(model.G)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py
--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py
+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py
@@ -29,7 +29,7 @@
return portrayal
-grid = NetworkModule(network_portrayal, 500, 500, library="sigma")
+grid = NetworkModule(network_portrayal, 500, 500)
chart = ChartModule(
[{"Label": "Gini", "Color": "Black"}], data_collector_name="datacollector"
)
diff --git a/examples/virus_on_network/virus_on_network/server.py b/examples/virus_on_network/virus_on_network/server.py
--- a/examples/virus_on_network/virus_on_network/server.py
+++ b/examples/virus_on_network/virus_on_network/server.py
@@ -52,7 +52,7 @@
return portrayal
-network = NetworkModule(network_portrayal, 500, 500, library="d3")
+network = NetworkModule(network_portrayal, 500, 500)
chart = ChartModule(
[
{"Label": "Infected", "Color": "#FF0000"},
diff --git a/mesa/visualization/modules/NetworkVisualization.py b/mesa/visualization/modules/NetworkVisualization.py
--- a/mesa/visualization/modules/NetworkVisualization.py
+++ b/mesa/visualization/modules/NetworkVisualization.py
@@ -2,7 +2,7 @@
Network Visualization Module
============
-Module for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.
+Module for rendering the network, using [d3.js](https://d3js.org/) framework.
"""
from mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE
@@ -12,19 +12,12 @@
package_includes = []
def __init__(
- self, portrayal_method, canvas_height=500, canvas_width=500, library="sigma"
+ self,
+ portrayal_method,
+ canvas_height=500,
+ canvas_width=500,
):
- library_types = ["sigma", "d3"]
- if library not in library_types:
- raise ValueError(
- f"Invalid javascript library type. Expected one of: {library_types}"
- )
-
- NetworkModule.package_includes = (
- ["NetworkModule_sigma.js", "sigma.min.js"]
- if library == "sigma"
- else ["NetworkModule_d3.js", D3_JS_FILE]
- )
+ NetworkModule.package_includes = ["NetworkModule_d3.js", D3_JS_FILE]
self.portrayal_method = portrayal_method
self.canvas_height = canvas_height
| {"golden_diff": "diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py\n--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py\n+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py\n@@ -29,7 +29,7 @@\n return portrayal\n \n \n-grid = NetworkModule(network_portrayal, 500, 500, library=\"sigma\")\n+grid = NetworkModule(network_portrayal, 500, 500)\n chart = ChartModule(\n [{\"Label\": \"Gini\", \"Color\": \"Black\"}], data_collector_name=\"datacollector\"\n )\ndiff --git a/examples/virus_on_network/virus_on_network/server.py b/examples/virus_on_network/virus_on_network/server.py\n--- a/examples/virus_on_network/virus_on_network/server.py\n+++ b/examples/virus_on_network/virus_on_network/server.py\n@@ -52,7 +52,7 @@\n return portrayal\n \n \n-network = NetworkModule(network_portrayal, 500, 500, library=\"d3\")\n+network = NetworkModule(network_portrayal, 500, 500)\n chart = ChartModule(\n [\n {\"Label\": \"Infected\", \"Color\": \"#FF0000\"},\ndiff --git a/mesa/visualization/modules/NetworkVisualization.py b/mesa/visualization/modules/NetworkVisualization.py\n--- a/mesa/visualization/modules/NetworkVisualization.py\n+++ b/mesa/visualization/modules/NetworkVisualization.py\n@@ -2,7 +2,7 @@\n Network Visualization Module\n ============\n \n-Module for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.\n+Module for rendering the network, using [d3.js](https://d3js.org/) framework.\n \n \"\"\"\n from mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE\n@@ -12,19 +12,12 @@\n package_includes = []\n \n def __init__(\n- self, portrayal_method, canvas_height=500, canvas_width=500, library=\"sigma\"\n+ self,\n+ portrayal_method,\n+ canvas_height=500,\n+ canvas_width=500,\n ):\n- library_types = [\"sigma\", \"d3\"]\n- if library not in library_types:\n- raise ValueError(\n- f\"Invalid javascript library type. Expected one of: {library_types}\"\n- )\n-\n- NetworkModule.package_includes = (\n- [\"NetworkModule_sigma.js\", \"sigma.min.js\"]\n- if library == \"sigma\"\n- else [\"NetworkModule_d3.js\", D3_JS_FILE]\n- )\n+ NetworkModule.package_includes = [\"NetworkModule_d3.js\", D3_JS_FILE]\n \n self.portrayal_method = portrayal_method\n self.canvas_height = canvas_height\n", "issue": "Remove Sigma.js backend for network visualization\nReading #388, I conclude that Sigma.js is less featureful than D3.js. We should just stick to polishing the D3.js version.\n", "before_files": [{"content": "import math\n\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.visualization.modules import NetworkModule\nfrom mesa.visualization.modules import TextElement\nfrom .model import VirusOnNetwork, State, number_infected\n\n\ndef network_portrayal(G):\n # The model ensures there is always 1 agent per node\n\n def node_color(agent):\n return {State.INFECTED: \"#FF0000\", State.SUSCEPTIBLE: \"#008000\"}.get(\n agent.state, \"#808080\"\n )\n\n def edge_color(agent1, agent2):\n if State.RESISTANT in (agent1.state, agent2.state):\n return \"#000000\"\n return \"#e8e8e8\"\n\n def edge_width(agent1, agent2):\n if State.RESISTANT in (agent1.state, agent2.state):\n return 3\n return 2\n\n def get_agents(source, target):\n return G.nodes[source][\"agent\"][0], G.nodes[target][\"agent\"][0]\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n \"size\": 6,\n \"color\": node_color(agents[0]),\n \"tooltip\": f\"id: {agents[0].unique_id}<br>state: {agents[0].state.name}\",\n }\n for (_, agents) in G.nodes.data(\"agent\")\n ]\n\n portrayal[\"edges\"] = [\n {\n \"source\": source,\n \"target\": target,\n \"color\": edge_color(*get_agents(source, target)),\n \"width\": edge_width(*get_agents(source, target)),\n }\n for (source, target) in G.edges\n ]\n\n return portrayal\n\n\nnetwork = NetworkModule(network_portrayal, 500, 500, library=\"d3\")\nchart = ChartModule(\n [\n {\"Label\": \"Infected\", \"Color\": \"#FF0000\"},\n {\"Label\": \"Susceptible\", \"Color\": \"#008000\"},\n {\"Label\": \"Resistant\", \"Color\": \"#808080\"},\n ]\n)\n\n\nclass MyTextElement(TextElement):\n def render(self, model):\n ratio = model.resistant_susceptible_ratio()\n ratio_text = \"∞\" if ratio is math.inf else f\"{ratio:.2f}\"\n infected_text = str(number_infected(model))\n\n return \"Resistant/Susceptible Ratio: {}<br>Infected Remaining: {}\".format(\n ratio_text, infected_text\n )\n\n\nmodel_params = {\n \"num_nodes\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n 10,\n 10,\n 100,\n 1,\n description=\"Choose how many agents to include in the model\",\n ),\n \"avg_node_degree\": UserSettableParameter(\n \"slider\", \"Avg Node Degree\", 3, 3, 8, 1, description=\"Avg Node Degree\"\n ),\n \"initial_outbreak_size\": UserSettableParameter(\n \"slider\",\n \"Initial Outbreak Size\",\n 1,\n 1,\n 10,\n 1,\n description=\"Initial Outbreak Size\",\n ),\n \"virus_spread_chance\": UserSettableParameter(\n \"slider\",\n \"Virus Spread Chance\",\n 0.4,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that susceptible neighbor will be infected\",\n ),\n \"virus_check_frequency\": UserSettableParameter(\n \"slider\",\n \"Virus Check Frequency\",\n 0.4,\n 0.0,\n 1.0,\n 0.1,\n description=\"Frequency the nodes check whether they are infected by \" \"a virus\",\n ),\n \"recovery_chance\": UserSettableParameter(\n \"slider\",\n \"Recovery Chance\",\n 0.3,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that the virus will be removed\",\n ),\n \"gain_resistance_chance\": UserSettableParameter(\n \"slider\",\n \"Gain Resistance Chance\",\n 0.5,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that a recovered agent will become \"\n \"resistant to this virus in the future\",\n ),\n}\n\nserver = ModularServer(\n VirusOnNetwork, [network, MyTextElement(), chart], \"Virus Model\", model_params\n)\nserver.port = 8521\n", "path": "examples/virus_on_network/virus_on_network/server.py"}, {"content": "from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.visualization.modules import NetworkModule\nfrom .model import BoltzmannWealthModelNetwork\n\n\ndef network_portrayal(G):\n # The model ensures there is 0 or 1 agent per node\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n \"id\": node_id,\n \"size\": 3 if agents else 1,\n \"color\": \"#CC0000\" if not agents or agents[0].wealth == 0 else \"#007959\",\n \"label\": None\n if not agents\n else f\"Agent:{agents[0].unique_id} Wealth:{agents[0].wealth}\",\n }\n for (node_id, agents) in G.nodes.data(\"agent\")\n ]\n\n portrayal[\"edges\"] = [\n {\"id\": edge_id, \"source\": source, \"target\": target, \"color\": \"#000000\"}\n for edge_id, (source, target) in enumerate(G.edges)\n ]\n\n return portrayal\n\n\ngrid = NetworkModule(network_portrayal, 500, 500, library=\"sigma\")\nchart = ChartModule(\n [{\"Label\": \"Gini\", \"Color\": \"Black\"}], data_collector_name=\"datacollector\"\n)\n\nmodel_params = {\n \"num_agents\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n 7,\n 2,\n 10,\n 1,\n description=\"Choose how many agents to include in the model\",\n ),\n \"num_nodes\": UserSettableParameter(\n \"slider\",\n \"Number of nodes\",\n 10,\n 3,\n 12,\n 1,\n description=\"Choose how many nodes to include in the model, with at \"\n \"least the same number of agents\",\n ),\n}\n\nserver = ModularServer(\n BoltzmannWealthModelNetwork, [grid, chart], \"Money Model\", model_params\n)\nserver.port = 8521\n", "path": "examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py"}, {"content": "\"\"\"\nNetwork Visualization Module\n============\n\nModule for rendering the network, using [sigma.js](http://sigmajs.org/) or [d3.js](https://d3js.org/) frameworks.\n\n\"\"\"\nfrom mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE\n\n\nclass NetworkModule(VisualizationElement):\n package_includes = []\n\n def __init__(\n self, portrayal_method, canvas_height=500, canvas_width=500, library=\"sigma\"\n ):\n library_types = [\"sigma\", \"d3\"]\n if library not in library_types:\n raise ValueError(\n f\"Invalid javascript library type. Expected one of: {library_types}\"\n )\n\n NetworkModule.package_includes = (\n [\"NetworkModule_sigma.js\", \"sigma.min.js\"]\n if library == \"sigma\"\n else [\"NetworkModule_d3.js\", D3_JS_FILE]\n )\n\n self.portrayal_method = portrayal_method\n self.canvas_height = canvas_height\n self.canvas_width = canvas_width\n new_element = f\"new NetworkModule({self.canvas_width}, {self.canvas_height})\"\n self.js_code = \"elements.push(\" + new_element + \");\"\n\n def render(self, model):\n return self.portrayal_method(model.G)\n", "path": "mesa/visualization/modules/NetworkVisualization.py"}], "after_files": [{"content": "import math\n\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.visualization.modules import NetworkModule\nfrom mesa.visualization.modules import TextElement\nfrom .model import VirusOnNetwork, State, number_infected\n\n\ndef network_portrayal(G):\n # The model ensures there is always 1 agent per node\n\n def node_color(agent):\n return {State.INFECTED: \"#FF0000\", State.SUSCEPTIBLE: \"#008000\"}.get(\n agent.state, \"#808080\"\n )\n\n def edge_color(agent1, agent2):\n if State.RESISTANT in (agent1.state, agent2.state):\n return \"#000000\"\n return \"#e8e8e8\"\n\n def edge_width(agent1, agent2):\n if State.RESISTANT in (agent1.state, agent2.state):\n return 3\n return 2\n\n def get_agents(source, target):\n return G.nodes[source][\"agent\"][0], G.nodes[target][\"agent\"][0]\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n \"size\": 6,\n \"color\": node_color(agents[0]),\n \"tooltip\": f\"id: {agents[0].unique_id}<br>state: {agents[0].state.name}\",\n }\n for (_, agents) in G.nodes.data(\"agent\")\n ]\n\n portrayal[\"edges\"] = [\n {\n \"source\": source,\n \"target\": target,\n \"color\": edge_color(*get_agents(source, target)),\n \"width\": edge_width(*get_agents(source, target)),\n }\n for (source, target) in G.edges\n ]\n\n return portrayal\n\n\nnetwork = NetworkModule(network_portrayal, 500, 500)\nchart = ChartModule(\n [\n {\"Label\": \"Infected\", \"Color\": \"#FF0000\"},\n {\"Label\": \"Susceptible\", \"Color\": \"#008000\"},\n {\"Label\": \"Resistant\", \"Color\": \"#808080\"},\n ]\n)\n\n\nclass MyTextElement(TextElement):\n def render(self, model):\n ratio = model.resistant_susceptible_ratio()\n ratio_text = \"∞\" if ratio is math.inf else f\"{ratio:.2f}\"\n infected_text = str(number_infected(model))\n\n return \"Resistant/Susceptible Ratio: {}<br>Infected Remaining: {}\".format(\n ratio_text, infected_text\n )\n\n\nmodel_params = {\n \"num_nodes\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n 10,\n 10,\n 100,\n 1,\n description=\"Choose how many agents to include in the model\",\n ),\n \"avg_node_degree\": UserSettableParameter(\n \"slider\", \"Avg Node Degree\", 3, 3, 8, 1, description=\"Avg Node Degree\"\n ),\n \"initial_outbreak_size\": UserSettableParameter(\n \"slider\",\n \"Initial Outbreak Size\",\n 1,\n 1,\n 10,\n 1,\n description=\"Initial Outbreak Size\",\n ),\n \"virus_spread_chance\": UserSettableParameter(\n \"slider\",\n \"Virus Spread Chance\",\n 0.4,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that susceptible neighbor will be infected\",\n ),\n \"virus_check_frequency\": UserSettableParameter(\n \"slider\",\n \"Virus Check Frequency\",\n 0.4,\n 0.0,\n 1.0,\n 0.1,\n description=\"Frequency the nodes check whether they are infected by \" \"a virus\",\n ),\n \"recovery_chance\": UserSettableParameter(\n \"slider\",\n \"Recovery Chance\",\n 0.3,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that the virus will be removed\",\n ),\n \"gain_resistance_chance\": UserSettableParameter(\n \"slider\",\n \"Gain Resistance Chance\",\n 0.5,\n 0.0,\n 1.0,\n 0.1,\n description=\"Probability that a recovered agent will become \"\n \"resistant to this virus in the future\",\n ),\n}\n\nserver = ModularServer(\n VirusOnNetwork, [network, MyTextElement(), chart], \"Virus Model\", model_params\n)\nserver.port = 8521\n", "path": "examples/virus_on_network/virus_on_network/server.py"}, {"content": "from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.visualization.modules import NetworkModule\nfrom .model import BoltzmannWealthModelNetwork\n\n\ndef network_portrayal(G):\n # The model ensures there is 0 or 1 agent per node\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n \"id\": node_id,\n \"size\": 3 if agents else 1,\n \"color\": \"#CC0000\" if not agents or agents[0].wealth == 0 else \"#007959\",\n \"label\": None\n if not agents\n else f\"Agent:{agents[0].unique_id} Wealth:{agents[0].wealth}\",\n }\n for (node_id, agents) in G.nodes.data(\"agent\")\n ]\n\n portrayal[\"edges\"] = [\n {\"id\": edge_id, \"source\": source, \"target\": target, \"color\": \"#000000\"}\n for edge_id, (source, target) in enumerate(G.edges)\n ]\n\n return portrayal\n\n\ngrid = NetworkModule(network_portrayal, 500, 500)\nchart = ChartModule(\n [{\"Label\": \"Gini\", \"Color\": \"Black\"}], data_collector_name=\"datacollector\"\n)\n\nmodel_params = {\n \"num_agents\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n 7,\n 2,\n 10,\n 1,\n description=\"Choose how many agents to include in the model\",\n ),\n \"num_nodes\": UserSettableParameter(\n \"slider\",\n \"Number of nodes\",\n 10,\n 3,\n 12,\n 1,\n description=\"Choose how many nodes to include in the model, with at \"\n \"least the same number of agents\",\n ),\n}\n\nserver = ModularServer(\n BoltzmannWealthModelNetwork, [grid, chart], \"Money Model\", model_params\n)\nserver.port = 8521\n", "path": "examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/server.py"}, {"content": "\"\"\"\nNetwork Visualization Module\n============\n\nModule for rendering the network, using [d3.js](https://d3js.org/) framework.\n\n\"\"\"\nfrom mesa.visualization.ModularVisualization import VisualizationElement, D3_JS_FILE\n\n\nclass NetworkModule(VisualizationElement):\n package_includes = []\n\n def __init__(\n self,\n portrayal_method,\n canvas_height=500,\n canvas_width=500,\n ):\n NetworkModule.package_includes = [\"NetworkModule_d3.js\", D3_JS_FILE]\n\n self.portrayal_method = portrayal_method\n self.canvas_height = canvas_height\n self.canvas_width = canvas_width\n new_element = f\"new NetworkModule({self.canvas_width}, {self.canvas_height})\"\n self.js_code = \"elements.push(\" + new_element + \");\"\n\n def render(self, model):\n return self.portrayal_method(model.G)\n", "path": "mesa/visualization/modules/NetworkVisualization.py"}]} | 2,608 | 671 |
gh_patches_debug_25965 | rasdani/github-patches | git_diff | facebookresearch__fairseq-4808 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`
This is a no-op in eager and in ONNX export, but it's better for other
tracers if this is preserved as shapes directly instead of converted to
a tensor.
There is a little annoying code duplication with
`torch.jit.is_scripting()`, which is unforunately necessary because we
didn't implement compile-time short circuiting correctly in TorchScript
lol.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairseq/modules/sinusoidal_positional_embedding.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import math
7 from typing import Any, Optional
8
9 import torch
10 import torch.onnx.operators
11 from fairseq import utils
12 from torch import Tensor, nn
13
14
15 class SinusoidalPositionalEmbedding(nn.Module):
16 """This module produces sinusoidal positional embeddings of any length.
17
18 Padding symbols are ignored.
19 """
20
21 def __init__(self, embedding_dim, padding_idx, init_size=1024):
22 super().__init__()
23 self.embedding_dim = embedding_dim
24 self.padding_idx = padding_idx if padding_idx is not None else 0
25 self.weights = SinusoidalPositionalEmbedding.get_embedding(
26 init_size, embedding_dim, padding_idx
27 )
28 self.onnx_trace = False
29 self.register_buffer("_float_tensor", torch.FloatTensor(1))
30 self.max_positions = int(1e5)
31
32 def prepare_for_onnx_export_(self):
33 self.onnx_trace = True
34
35 @staticmethod
36 def get_embedding(
37 num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
38 ):
39 """Build sinusoidal embeddings.
40
41 This matches the implementation in tensor2tensor, but differs slightly
42 from the description in Section 3.5 of "Attention Is All You Need".
43 """
44 half_dim = embedding_dim // 2
45 emb = math.log(10000) / (half_dim - 1)
46 emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
47 emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
48 1
49 ) * emb.unsqueeze(0)
50 emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
51 num_embeddings, -1
52 )
53 if embedding_dim % 2 == 1:
54 # zero pad
55 emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
56 if padding_idx is not None:
57 emb[padding_idx, :] = 0
58 return emb
59
60 def forward(
61 self,
62 input,
63 incremental_state: Optional[Any] = None,
64 timestep: Optional[Tensor] = None,
65 positions: Optional[Any] = None,
66 ):
67 """Input is expected to be of size [bsz x seqlen]."""
68 if torch.jit.is_scripting():
69 bspair = torch.onnx.operators.shape_as_tensor(input)
70 elif torch.onnx.is_in_onnx_export():
71 bspair = torch.onnx.operators.shape_as_tensor(input)
72 else:
73 bspair = input.size()
74 bsz, seq_len = bspair[0], bspair[1]
75 max_pos = self.padding_idx + 1 + seq_len
76 if self.weights is None or max_pos > self.weights.size(0):
77 # recompute/expand embeddings if needed
78 self.weights = SinusoidalPositionalEmbedding.get_embedding(
79 max_pos, self.embedding_dim, self.padding_idx
80 )
81 self.weights = self.weights.to(self._float_tensor)
82
83 if incremental_state is not None:
84 # positions is the same for every token when decoding a single step
85 pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
86 if self.onnx_trace:
87 return (
88 self.weights.index_select(index=self.padding_idx + pos, dim=0)
89 .unsqueeze(1)
90 .repeat(bsz, 1, 1)
91 )
92 return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
93
94 positions = utils.make_positions(
95 input, self.padding_idx, onnx_trace=self.onnx_trace
96 )
97 if self.onnx_trace:
98 flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
99 embedding_shape = torch.cat(
100 (bsz, seq_len, torch.tensor([-1], dtype=torch.long))
101 )
102 embeddings = torch.onnx.operators.reshape_from_tensor_shape(
103 flat_embeddings, embedding_shape
104 )
105 return embeddings
106 return (
107 self.weights.index_select(0, positions.view(-1))
108 .view(bsz, seq_len, -1)
109 .detach()
110 )
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py
--- a/fairseq/modules/sinusoidal_positional_embedding.py
+++ b/fairseq/modules/sinusoidal_positional_embedding.py
@@ -65,12 +65,7 @@
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
- if torch.jit.is_scripting():
- bspair = torch.onnx.operators.shape_as_tensor(input)
- elif torch.onnx.is_in_onnx_export():
- bspair = torch.onnx.operators.shape_as_tensor(input)
- else:
- bspair = input.size()
+ bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
@@ -97,7 +92,7 @@
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
- (bsz, seq_len, torch.tensor([-1], dtype=torch.long))
+ (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
| {"golden_diff": "diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py\n--- a/fairseq/modules/sinusoidal_positional_embedding.py\n+++ b/fairseq/modules/sinusoidal_positional_embedding.py\n@@ -65,12 +65,7 @@\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n- if torch.jit.is_scripting():\n- bspair = torch.onnx.operators.shape_as_tensor(input)\n- elif torch.onnx.is_in_onnx_export():\n- bspair = torch.onnx.operators.shape_as_tensor(input)\n- else:\n- bspair = input.size()\n+ bspair = torch.onnx.operators.shape_as_tensor(input)\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n@@ -97,7 +92,7 @@\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n- (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n+ (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n", "issue": "[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`\nThis is a no-op in eager and in ONNX export, but it's better for other\ntracers if this is preserved as shapes directly instead of converted to\na tensor.\n\nThere is a little annoying code duplication with\n`torch.jit.is_scripting()`, which is unforunately necessary because we\ndidn't implement compile-time short circuiting correctly in TorchScript\nlol.\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.onnx.operators\nfrom fairseq import utils\nfrom torch import Tensor, nn\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx if padding_idx is not None else 0\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n self.onnx_trace = False\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.max_positions = int(1e5)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(\n self,\n input,\n incremental_state: Optional[Any] = None,\n timestep: Optional[Tensor] = None,\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n if torch.jit.is_scripting():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n elif torch.onnx.is_in_onnx_export():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n else:\n bspair = input.size()\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos, self.embedding_dim, self.padding_idx\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return (\n self.weights.index_select(index=self.padding_idx + pos, dim=0)\n .unsqueeze(1)\n .repeat(bsz, 1, 1)\n )\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(\n input, self.padding_idx, onnx_trace=self.onnx_trace\n )\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n )\n return embeddings\n return (\n self.weights.index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n .detach()\n )\n", "path": "fairseq/modules/sinusoidal_positional_embedding.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.onnx.operators\nfrom fairseq import utils\nfrom torch import Tensor, nn\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx if padding_idx is not None else 0\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n self.onnx_trace = False\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.max_positions = int(1e5)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(\n self,\n input,\n incremental_state: Optional[Any] = None,\n timestep: Optional[Tensor] = None,\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bspair = torch.onnx.operators.shape_as_tensor(input)\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos, self.embedding_dim, self.padding_idx\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return (\n self.weights.index_select(index=self.padding_idx + pos, dim=0)\n .unsqueeze(1)\n .repeat(bsz, 1, 1)\n )\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(\n input, self.padding_idx, onnx_trace=self.onnx_trace\n )\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n )\n return embeddings\n return (\n self.weights.index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n .detach()\n )\n", "path": "fairseq/modules/sinusoidal_positional_embedding.py"}]} | 1,537 | 338 |
gh_patches_debug_14196 | rasdani/github-patches | git_diff | mozilla__pontoon-2826 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pretranslation issues with multiline strings
Pretranslation struggles to translate Fluent strings where line breaks are used to limit the line width, as it treats each line as a separate sentence.
```
rec-pw-1-2 =
Make this password unique and different from any others you use.
A good strategy to follow is to combine two or more unrelated
words to create an entire pass phrase, and include numbers and symbols.
```
I don't think we can completely ignore line breaks, because there are some cases where these have an effect on display ([example](https://searchfox.org/mozilla-central/rev/169bf38e150667afac81ab73ef8b5ace8f1dfa8d/browser/locales/en-US/browser/downloads.ftl#244-248)). But maybe we can strip line breaks (i.e. replace them with a whitespace) when sending the translation to the machine translation engine, hoping that translators will catch edge cases when reviewing?
Cc @eemeli because that's a topic that we discussed at some point for the Fluent editor.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/pretranslation/pretranslate.py`
Content:
```
1 import logging
2 import operator
3 import re
4
5 from django.db.models import CharField, Value as V
6 from django.db.models.functions import Concat
7
8 from fluent.syntax import FluentParser, FluentSerializer
9 from functools import reduce
10
11 from pontoon.base.models import User, TranslatedResource
12 from pontoon.base.fluent import FlatTransformer, create_locale_plural_variants
13 from pontoon.machinery.utils import (
14 get_google_translate_data,
15 get_translation_memory_data,
16 )
17
18
19 log = logging.getLogger(__name__)
20
21 parser = FluentParser()
22 serializer = FluentSerializer()
23
24
25 class PretranslationTransformer(FlatTransformer):
26 def __init__(self, locale):
27 self.services = []
28 self.locale = locale
29
30 def visit_SelectExpression(self, node):
31 create_locale_plural_variants(node, self.locale)
32 return self.generic_visit(node)
33
34 def visit_TextElement(self, node):
35 pretranslation, service = get_pretranslated_data(node.value, self.locale)
36
37 if pretranslation is None:
38 raise ValueError(
39 f"Pretranslation for `{node.value}` to {self.locale.code} not available."
40 )
41
42 node.value = pretranslation
43 self.services.append(service)
44 return node
45
46
47 def get_pretranslations(entity, locale):
48 """
49 Get pretranslations for the entity-locale pair using internal translation memory and
50 Google's machine translation.
51
52 For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements
53 and then only pretranslate TextElements. Set the most frequent TextElement
54 pretranslation author as the author of the entire pretranslation.
55
56 :arg Entity entity: the Entity object
57 :arg Locale locale: the Locale object
58
59 :returns: a list of tuples, consisting of:
60 - a pretranslation of the entity
61 - a plural form
62 - a user (representing TM or GT service)
63 """
64 source = entity.string
65 services = {
66 "tm": User.objects.get(email="[email protected]"),
67 "gt": User.objects.get(email="[email protected]"),
68 }
69
70 if entity.resource.format == "ftl":
71 source_ast = parser.parse_entry(source)
72 pt_transformer = PretranslationTransformer(locale)
73
74 try:
75 pretranslated_ast = pt_transformer.visit(source_ast)
76 except ValueError as e:
77 log.info(f"Fluent pretranslation error: {e}")
78 return []
79
80 pretranslation = serializer.serialize_entry(pretranslated_ast)
81
82 authors = [services[service] for service in pt_transformer.services]
83 author = max(set(authors), key=authors.count) if authors else services["tm"]
84
85 return [(pretranslation, None, author)]
86
87 else:
88 pretranslation, service = get_pretranslated_data(source, locale)
89
90 if pretranslation is None:
91 return []
92
93 author = services[service]
94 if entity.string_plural == "":
95 return [(pretranslation, None, author)]
96 else:
97 plural_forms = range(0, locale.nplurals or 1)
98 return [
99 (pretranslation, plural_form, author) for plural_form in plural_forms
100 ]
101
102
103 def get_pretranslated_data(source, locale):
104 # Empty strings do not need translation
105 if re.search("^\\s*$", source):
106 return source, "tm"
107
108 # Try to get matches from Translation Memory
109 tm_response = get_translation_memory_data(text=source, locale=locale)
110 tm_perfect = [t for t in tm_response if int(t["quality"]) == 100]
111 if tm_perfect:
112 return tm_perfect[0]["target"], "tm"
113
114 # Fetch from Google Translate
115 elif locale.google_translate_code:
116 gt_response = get_google_translate_data(text=source, locale=locale)
117 if gt_response["status"]:
118 return gt_response["translation"], "gt"
119
120 return None, None
121
122
123 def update_changed_instances(tr_filter, tr_dict, translations):
124 """
125 Update the latest activity and stats for changed Locales, ProjectLocales
126 & TranslatedResources
127 """
128 tr_filter = tuple(tr_filter)
129 # Combine all generated filters with an OK operator.
130 # `operator.ior` is the '|' Python operator, which turns into a logical OR
131 # when used between django ORM query objects.
132 tr_query = reduce(operator.ior, tr_filter)
133
134 translatedresources = TranslatedResource.objects.filter(tr_query).annotate(
135 locale_resource=Concat(
136 "locale_id", V("-"), "resource_id", output_field=CharField()
137 )
138 )
139
140 translatedresources.update_stats()
141
142 for tr in translatedresources:
143 index = tr_dict[tr.locale_resource]
144 translation = translations[index]
145 translation.update_latest_translation()
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py
--- a/pontoon/pretranslation/pretranslate.py
+++ b/pontoon/pretranslation/pretranslate.py
@@ -32,11 +32,15 @@
return self.generic_visit(node)
def visit_TextElement(self, node):
- pretranslation, service = get_pretranslated_data(node.value, self.locale)
+ # Machine translation treats each line as separate sentence,
+ # hence we replace newline characters with spaces.
+ source = node.value.replace("\n", " ")
+
+ pretranslation, service = get_pretranslated_data(source, self.locale)
if pretranslation is None:
raise ValueError(
- f"Pretranslation for `{node.value}` to {self.locale.code} not available."
+ f"Pretranslation for `{source}` to {self.locale.code} not available."
)
node.value = pretranslation
| {"golden_diff": "diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py\n--- a/pontoon/pretranslation/pretranslate.py\n+++ b/pontoon/pretranslation/pretranslate.py\n@@ -32,11 +32,15 @@\n return self.generic_visit(node)\n \n def visit_TextElement(self, node):\n- pretranslation, service = get_pretranslated_data(node.value, self.locale)\n+ # Machine translation treats each line as separate sentence,\n+ # hence we replace newline characters with spaces.\n+ source = node.value.replace(\"\\n\", \" \")\n+\n+ pretranslation, service = get_pretranslated_data(source, self.locale)\n \n if pretranslation is None:\n raise ValueError(\n- f\"Pretranslation for `{node.value}` to {self.locale.code} not available.\"\n+ f\"Pretranslation for `{source}` to {self.locale.code} not available.\"\n )\n \n node.value = pretranslation\n", "issue": "Pretranslation issues with multiline strings\nPretranslation struggles to translate Fluent strings where line breaks are used to limit the line width, as it treats each line as a separate sentence.\r\n\r\n```\r\nrec-pw-1-2 =\r\n Make this password unique and different from any others you use.\r\n A good strategy to follow is to combine two or more unrelated\r\n words to create an entire pass phrase, and include numbers and symbols.\r\n```\r\n\r\nI don't think we can completely ignore line breaks, because there are some cases where these have an effect on display ([example](https://searchfox.org/mozilla-central/rev/169bf38e150667afac81ab73ef8b5ace8f1dfa8d/browser/locales/en-US/browser/downloads.ftl#244-248)). But maybe we can strip line breaks (i.e. replace them with a whitespace) when sending the translation to the machine translation engine, hoping that translators will catch edge cases when reviewing?\r\n\r\nCc @eemeli because that's a topic that we discussed at some point for the Fluent editor.\n", "before_files": [{"content": "import logging\nimport operator\nimport re\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom fluent.syntax import FluentParser, FluentSerializer\nfrom functools import reduce\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.base.fluent import FlatTransformer, create_locale_plural_variants\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\n\nlog = logging.getLogger(__name__)\n\nparser = FluentParser()\nserializer = FluentSerializer()\n\n\nclass PretranslationTransformer(FlatTransformer):\n def __init__(self, locale):\n self.services = []\n self.locale = locale\n\n def visit_SelectExpression(self, node):\n create_locale_plural_variants(node, self.locale)\n return self.generic_visit(node)\n\n def visit_TextElement(self, node):\n pretranslation, service = get_pretranslated_data(node.value, self.locale)\n\n if pretranslation is None:\n raise ValueError(\n f\"Pretranslation for `{node.value}` to {self.locale.code} not available.\"\n )\n\n node.value = pretranslation\n self.services.append(service)\n return node\n\n\ndef get_pretranslations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair using internal translation memory and\n Google's machine translation.\n\n For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements\n and then only pretranslate TextElements. Set the most frequent TextElement\n pretranslation author as the author of the entire pretranslation.\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuples, consisting of:\n - a pretranslation of the entity\n - a plural form\n - a user (representing TM or GT service)\n \"\"\"\n source = entity.string\n services = {\n \"tm\": User.objects.get(email=\"[email protected]\"),\n \"gt\": User.objects.get(email=\"[email protected]\"),\n }\n\n if entity.resource.format == \"ftl\":\n source_ast = parser.parse_entry(source)\n pt_transformer = PretranslationTransformer(locale)\n\n try:\n pretranslated_ast = pt_transformer.visit(source_ast)\n except ValueError as e:\n log.info(f\"Fluent pretranslation error: {e}\")\n return []\n\n pretranslation = serializer.serialize_entry(pretranslated_ast)\n\n authors = [services[service] for service in pt_transformer.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n\n return [(pretranslation, None, author)]\n\n else:\n pretranslation, service = get_pretranslated_data(source, locale)\n\n if pretranslation is None:\n return []\n\n author = services[service]\n if entity.string_plural == \"\":\n return [(pretranslation, None, author)]\n else:\n plural_forms = range(0, locale.nplurals or 1)\n return [\n (pretranslation, plural_form, author) for plural_form in plural_forms\n ]\n\n\ndef get_pretranslated_data(source, locale):\n # Empty strings do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n\n # Try to get matches from Translation Memory\n tm_response = get_translation_memory_data(text=source, locale=locale)\n tm_perfect = [t for t in tm_response if int(t[\"quality\"]) == 100]\n if tm_perfect:\n return tm_perfect[0][\"target\"], \"tm\"\n\n # Fetch from Google Translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(text=source, locale=locale)\n if gt_response[\"status\"]:\n return gt_response[\"translation\"], \"gt\"\n\n return None, None\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}], "after_files": [{"content": "import logging\nimport operator\nimport re\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom fluent.syntax import FluentParser, FluentSerializer\nfrom functools import reduce\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.base.fluent import FlatTransformer, create_locale_plural_variants\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\n\nlog = logging.getLogger(__name__)\n\nparser = FluentParser()\nserializer = FluentSerializer()\n\n\nclass PretranslationTransformer(FlatTransformer):\n def __init__(self, locale):\n self.services = []\n self.locale = locale\n\n def visit_SelectExpression(self, node):\n create_locale_plural_variants(node, self.locale)\n return self.generic_visit(node)\n\n def visit_TextElement(self, node):\n # Machine translation treats each line as separate sentence,\n # hence we replace newline characters with spaces.\n source = node.value.replace(\"\\n\", \" \")\n\n pretranslation, service = get_pretranslated_data(source, self.locale)\n\n if pretranslation is None:\n raise ValueError(\n f\"Pretranslation for `{source}` to {self.locale.code} not available.\"\n )\n\n node.value = pretranslation\n self.services.append(service)\n return node\n\n\ndef get_pretranslations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair using internal translation memory and\n Google's machine translation.\n\n For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements\n and then only pretranslate TextElements. Set the most frequent TextElement\n pretranslation author as the author of the entire pretranslation.\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuples, consisting of:\n - a pretranslation of the entity\n - a plural form\n - a user (representing TM or GT service)\n \"\"\"\n source = entity.string\n services = {\n \"tm\": User.objects.get(email=\"[email protected]\"),\n \"gt\": User.objects.get(email=\"[email protected]\"),\n }\n\n if entity.resource.format == \"ftl\":\n source_ast = parser.parse_entry(source)\n pt_transformer = PretranslationTransformer(locale)\n\n try:\n pretranslated_ast = pt_transformer.visit(source_ast)\n except ValueError as e:\n log.info(f\"Fluent pretranslation error: {e}\")\n return []\n\n pretranslation = serializer.serialize_entry(pretranslated_ast)\n\n authors = [services[service] for service in pt_transformer.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n\n return [(pretranslation, None, author)]\n\n else:\n pretranslation, service = get_pretranslated_data(source, locale)\n\n if pretranslation is None:\n return []\n\n author = services[service]\n if entity.string_plural == \"\":\n return [(pretranslation, None, author)]\n else:\n plural_forms = range(0, locale.nplurals or 1)\n return [\n (pretranslation, plural_form, author) for plural_form in plural_forms\n ]\n\n\ndef get_pretranslated_data(source, locale):\n # Empty strings do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n\n # Try to get matches from Translation Memory\n tm_response = get_translation_memory_data(text=source, locale=locale)\n tm_perfect = [t for t in tm_response if int(t[\"quality\"]) == 100]\n if tm_perfect:\n return tm_perfect[0][\"target\"], \"tm\"\n\n # Fetch from Google Translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(text=source, locale=locale)\n if gt_response[\"status\"]:\n return gt_response[\"translation\"], \"gt\"\n\n return None, None\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}]} | 1,834 | 205 |
gh_patches_debug_7431 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3501 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow TinyMCE to be used in inline-mode
Inline-mode of TinyMCE (https://www.tiny.cloud/docs/demo/inline) is nice among other reasons because the edited text looks exactly the way as the saved result.
To enable it you could simply set `{"inline": "true"}` as the value in the field "Other settings" of the TinyMCE controlpanel. That is enough to trigger it.
But that fails with `Could not initialize inline editor on invalid inline target element` since the html-tag that renders the TinyMCE is a `<textarea>`. See https://www.tiny.cloud/blog/tinymce-inline-option-not-working for details about that constraint.
The `<textarea>` thing is defined in `plone.app.z3cform.widget.RichTextWidget` which has `TextareaWidget` as `_base`. In `render_input_mode` of the widget it is possible to conditionally render a different html element like `div` around the text. But then fallback to textarea no longer works and saving also fails and and and...
So it seems that using inline-mode in not straightforward. The question is do we like that option enough to do the required work?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/patterns/settings.py`
Content:
```
1 from Acquisition import aq_inner
2 from Acquisition import aq_parent
3 from borg.localrole.interfaces import IFactoryTempFolder
4 from plone.app.content.browser.interfaces import IFolderContentsView
5 from plone.app.widgets.utils import get_relateditems_options
6 from plone.app.z3cform.utils import call_callables
7 from plone.registry.interfaces import IRegistry
8 from plone.uuid.interfaces import IUUID
9 from Products.CMFCore.interfaces._content import IFolderish
10 from plone.base.interfaces import ILinkSchema
11 from plone.base.interfaces import IPatternsSettings
12 from plone.base.interfaces import IPloneSiteRoot
13 from Products.CMFPlone.patterns.tinymce import TinyMCESettingsGenerator
14 from Products.CMFPlone.utils import get_portal
15 from zope.component import getUtility
16 from zope.i18n import translate
17 from zope.interface import implementer
18 from zope.schema.interfaces import IVocabularyFactory
19
20 import json
21
22
23 @implementer(IPatternsSettings)
24 class PatternSettingsAdapter:
25 """
26 Provides default plone settings relevant for patterns.
27 """
28
29 def __init__(self, context, request, field):
30 self.request = request
31 self.context = context
32 self.field = field
33
34 def __call__(self):
35 data = {}
36 data.update(self.mark_special_links())
37 data.update(self.structure_updater())
38 return data
39
40 def structure_updater(self):
41 """Generate the options for the structure updater pattern.
42 If we're not in folder contents view, do not expose these options.
43 """
44 data = {}
45 view = self.request.get("PUBLISHED", None)
46 if IFolderContentsView.providedBy(view):
47 data = {
48 "data-pat-structureupdater": json.dumps(
49 {
50 "titleSelector": ".documentFirstHeading",
51 "descriptionSelector": ".documentDescription",
52 }
53 )
54 }
55 return data
56
57 def mark_special_links(self):
58 result = {}
59
60 registry = getUtility(IRegistry)
61 settings = registry.forInterface(ILinkSchema, prefix="plone", check=False)
62
63 msl = settings.mark_special_links
64 elonw = settings.external_links_open_new_window
65 if msl or elonw:
66 result = {
67 "data-pat-markspeciallinks": json.dumps(
68 {"external_links_open_new_window": elonw, "mark_special_links": msl}
69 )
70 }
71 return result
72
73 @property
74 def image_scales(self):
75 factory = getUtility(IVocabularyFactory, "plone.app.vocabularies.ImagesScales")
76 vocabulary = factory(self.context)
77 ret = [{"title": translate(it.title), "value": it.value} for it in vocabulary]
78 ret = sorted(ret, key=lambda it: it["title"])
79 return json.dumps(ret)
80
81 def tinymce(self):
82 """
83 data-pat-tinymce : JSON.stringify({
84 relatedItems: {
85 vocabularyUrl: config.portal_url +
86 '/@@getVocabulary?name=plone.app.vocabularies.Catalog'
87 },
88 tiny: config,
89 prependToUrl: 'resolveuid/',
90 linkAttribute: 'UID',
91 prependToScalePart: '/@@images/image/'
92 })
93 """
94
95 generator = TinyMCESettingsGenerator(self.context, self.request)
96 settings = generator.settings
97 folder = aq_inner(self.context)
98
99 # Test if we are currently creating an Archetype object
100 if IFactoryTempFolder.providedBy(aq_parent(folder)):
101 folder = aq_parent(aq_parent(aq_parent(folder)))
102 if not IFolderish.providedBy(folder):
103 folder = aq_parent(folder)
104
105 if IPloneSiteRoot.providedBy(folder):
106 initial = None
107 else:
108 initial = IUUID(folder, None)
109
110 portal = get_portal()
111 portal_url = portal.absolute_url()
112 current_path = folder.absolute_url()[len(portal_url) :]
113
114 image_types = settings.image_objects or []
115
116 server_url = self.request.get("SERVER_URL", "")
117 site_path = portal_url[len(server_url) :]
118
119 related_items_config = get_relateditems_options(
120 context=self.context,
121 value=None,
122 separator=";",
123 vocabulary_name="plone.app.vocabularies.Catalog",
124 vocabulary_view="@@getVocabulary",
125 field_name=None,
126 )
127 related_items_config = call_callables(related_items_config, self.context)
128
129 configuration = {
130 "base_url": self.context.absolute_url(),
131 "imageTypes": image_types,
132 "imageScales": self.image_scales,
133 "linkAttribute": "UID",
134 # This is for loading the languages on tinymce
135 "loadingBaseUrl": "{}/++plone++static/components/tinymce-builded/"
136 "js/tinymce".format(portal_url),
137 "relatedItems": related_items_config,
138 "prependToScalePart": "/@@images/image/",
139 "prependToUrl": "{}/resolveuid/".format(site_path.rstrip("/")),
140 "tiny": generator.get_tiny_config(),
141 "upload": {
142 "baseUrl": portal_url,
143 "currentPath": current_path,
144 "initialFolder": initial,
145 "maxFiles": 1,
146 "relativePath": "@@fileUpload",
147 "showTitle": False,
148 "uploadMultiple": False,
149 },
150 }
151 return {"data-pat-tinymce": json.dumps(configuration)}
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/patterns/settings.py b/Products/CMFPlone/patterns/settings.py
--- a/Products/CMFPlone/patterns/settings.py
+++ b/Products/CMFPlone/patterns/settings.py
@@ -137,6 +137,7 @@
"relatedItems": related_items_config,
"prependToScalePart": "/@@images/image/",
"prependToUrl": "{}/resolveuid/".format(site_path.rstrip("/")),
+ "inline": settings.inline,
"tiny": generator.get_tiny_config(),
"upload": {
"baseUrl": portal_url,
| {"golden_diff": "diff --git a/Products/CMFPlone/patterns/settings.py b/Products/CMFPlone/patterns/settings.py\n--- a/Products/CMFPlone/patterns/settings.py\n+++ b/Products/CMFPlone/patterns/settings.py\n@@ -137,6 +137,7 @@\n \"relatedItems\": related_items_config,\n \"prependToScalePart\": \"/@@images/image/\",\n \"prependToUrl\": \"{}/resolveuid/\".format(site_path.rstrip(\"/\")),\n+ \"inline\": settings.inline,\n \"tiny\": generator.get_tiny_config(),\n \"upload\": {\n \"baseUrl\": portal_url,\n", "issue": "Allow TinyMCE to be used in inline-mode\nInline-mode of TinyMCE (https://www.tiny.cloud/docs/demo/inline) is nice among other reasons because the edited text looks exactly the way as the saved result. \r\n\r\nTo enable it you could simply set `{\"inline\": \"true\"}` as the value in the field \"Other settings\" of the TinyMCE controlpanel. That is enough to trigger it. \r\n\r\nBut that fails with `Could not initialize inline editor on invalid inline target element` since the html-tag that renders the TinyMCE is a `<textarea>`. See https://www.tiny.cloud/blog/tinymce-inline-option-not-working for details about that constraint.\r\n\r\nThe `<textarea>` thing is defined in `plone.app.z3cform.widget.RichTextWidget` which has `TextareaWidget` as `_base`. In `render_input_mode` of the widget it is possible to conditionally render a different html element like `div` around the text. But then fallback to textarea no longer works and saving also fails and and and... \r\n\r\nSo it seems that using inline-mode in not straightforward. The question is do we like that option enough to do the required work?\r\n\n", "before_files": [{"content": "from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom borg.localrole.interfaces import IFactoryTempFolder\nfrom plone.app.content.browser.interfaces import IFolderContentsView\nfrom plone.app.widgets.utils import get_relateditems_options\nfrom plone.app.z3cform.utils import call_callables\nfrom plone.registry.interfaces import IRegistry\nfrom plone.uuid.interfaces import IUUID\nfrom Products.CMFCore.interfaces._content import IFolderish\nfrom plone.base.interfaces import ILinkSchema\nfrom plone.base.interfaces import IPatternsSettings\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.patterns.tinymce import TinyMCESettingsGenerator\nfrom Products.CMFPlone.utils import get_portal\nfrom zope.component import getUtility\nfrom zope.i18n import translate\nfrom zope.interface import implementer\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport json\n\n\n@implementer(IPatternsSettings)\nclass PatternSettingsAdapter:\n \"\"\"\n Provides default plone settings relevant for patterns.\n \"\"\"\n\n def __init__(self, context, request, field):\n self.request = request\n self.context = context\n self.field = field\n\n def __call__(self):\n data = {}\n data.update(self.mark_special_links())\n data.update(self.structure_updater())\n return data\n\n def structure_updater(self):\n \"\"\"Generate the options for the structure updater pattern.\n If we're not in folder contents view, do not expose these options.\n \"\"\"\n data = {}\n view = self.request.get(\"PUBLISHED\", None)\n if IFolderContentsView.providedBy(view):\n data = {\n \"data-pat-structureupdater\": json.dumps(\n {\n \"titleSelector\": \".documentFirstHeading\",\n \"descriptionSelector\": \".documentDescription\",\n }\n )\n }\n return data\n\n def mark_special_links(self):\n result = {}\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ILinkSchema, prefix=\"plone\", check=False)\n\n msl = settings.mark_special_links\n elonw = settings.external_links_open_new_window\n if msl or elonw:\n result = {\n \"data-pat-markspeciallinks\": json.dumps(\n {\"external_links_open_new_window\": elonw, \"mark_special_links\": msl}\n )\n }\n return result\n\n @property\n def image_scales(self):\n factory = getUtility(IVocabularyFactory, \"plone.app.vocabularies.ImagesScales\")\n vocabulary = factory(self.context)\n ret = [{\"title\": translate(it.title), \"value\": it.value} for it in vocabulary]\n ret = sorted(ret, key=lambda it: it[\"title\"])\n return json.dumps(ret)\n\n def tinymce(self):\n \"\"\"\n data-pat-tinymce : JSON.stringify({\n relatedItems: {\n vocabularyUrl: config.portal_url +\n '/@@getVocabulary?name=plone.app.vocabularies.Catalog'\n },\n tiny: config,\n prependToUrl: 'resolveuid/',\n linkAttribute: 'UID',\n prependToScalePart: '/@@images/image/'\n })\n \"\"\"\n\n generator = TinyMCESettingsGenerator(self.context, self.request)\n settings = generator.settings\n folder = aq_inner(self.context)\n\n # Test if we are currently creating an Archetype object\n if IFactoryTempFolder.providedBy(aq_parent(folder)):\n folder = aq_parent(aq_parent(aq_parent(folder)))\n if not IFolderish.providedBy(folder):\n folder = aq_parent(folder)\n\n if IPloneSiteRoot.providedBy(folder):\n initial = None\n else:\n initial = IUUID(folder, None)\n\n portal = get_portal()\n portal_url = portal.absolute_url()\n current_path = folder.absolute_url()[len(portal_url) :]\n\n image_types = settings.image_objects or []\n\n server_url = self.request.get(\"SERVER_URL\", \"\")\n site_path = portal_url[len(server_url) :]\n\n related_items_config = get_relateditems_options(\n context=self.context,\n value=None,\n separator=\";\",\n vocabulary_name=\"plone.app.vocabularies.Catalog\",\n vocabulary_view=\"@@getVocabulary\",\n field_name=None,\n )\n related_items_config = call_callables(related_items_config, self.context)\n\n configuration = {\n \"base_url\": self.context.absolute_url(),\n \"imageTypes\": image_types,\n \"imageScales\": self.image_scales,\n \"linkAttribute\": \"UID\",\n # This is for loading the languages on tinymce\n \"loadingBaseUrl\": \"{}/++plone++static/components/tinymce-builded/\"\n \"js/tinymce\".format(portal_url),\n \"relatedItems\": related_items_config,\n \"prependToScalePart\": \"/@@images/image/\",\n \"prependToUrl\": \"{}/resolveuid/\".format(site_path.rstrip(\"/\")),\n \"tiny\": generator.get_tiny_config(),\n \"upload\": {\n \"baseUrl\": portal_url,\n \"currentPath\": current_path,\n \"initialFolder\": initial,\n \"maxFiles\": 1,\n \"relativePath\": \"@@fileUpload\",\n \"showTitle\": False,\n \"uploadMultiple\": False,\n },\n }\n return {\"data-pat-tinymce\": json.dumps(configuration)}\n", "path": "Products/CMFPlone/patterns/settings.py"}], "after_files": [{"content": "from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom borg.localrole.interfaces import IFactoryTempFolder\nfrom plone.app.content.browser.interfaces import IFolderContentsView\nfrom plone.app.widgets.utils import get_relateditems_options\nfrom plone.app.z3cform.utils import call_callables\nfrom plone.registry.interfaces import IRegistry\nfrom plone.uuid.interfaces import IUUID\nfrom Products.CMFCore.interfaces._content import IFolderish\nfrom plone.base.interfaces import ILinkSchema\nfrom plone.base.interfaces import IPatternsSettings\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.patterns.tinymce import TinyMCESettingsGenerator\nfrom Products.CMFPlone.utils import get_portal\nfrom zope.component import getUtility\nfrom zope.i18n import translate\nfrom zope.interface import implementer\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport json\n\n\n@implementer(IPatternsSettings)\nclass PatternSettingsAdapter:\n \"\"\"\n Provides default plone settings relevant for patterns.\n \"\"\"\n\n def __init__(self, context, request, field):\n self.request = request\n self.context = context\n self.field = field\n\n def __call__(self):\n data = {}\n data.update(self.mark_special_links())\n data.update(self.structure_updater())\n return data\n\n def structure_updater(self):\n \"\"\"Generate the options for the structure updater pattern.\n If we're not in folder contents view, do not expose these options.\n \"\"\"\n data = {}\n view = self.request.get(\"PUBLISHED\", None)\n if IFolderContentsView.providedBy(view):\n data = {\n \"data-pat-structureupdater\": json.dumps(\n {\n \"titleSelector\": \".documentFirstHeading\",\n \"descriptionSelector\": \".documentDescription\",\n }\n )\n }\n return data\n\n def mark_special_links(self):\n result = {}\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ILinkSchema, prefix=\"plone\", check=False)\n\n msl = settings.mark_special_links\n elonw = settings.external_links_open_new_window\n if msl or elonw:\n result = {\n \"data-pat-markspeciallinks\": json.dumps(\n {\"external_links_open_new_window\": elonw, \"mark_special_links\": msl}\n )\n }\n return result\n\n @property\n def image_scales(self):\n factory = getUtility(IVocabularyFactory, \"plone.app.vocabularies.ImagesScales\")\n vocabulary = factory(self.context)\n ret = [{\"title\": translate(it.title), \"value\": it.value} for it in vocabulary]\n ret = sorted(ret, key=lambda it: it[\"title\"])\n return json.dumps(ret)\n\n def tinymce(self):\n \"\"\"\n data-pat-tinymce : JSON.stringify({\n relatedItems: {\n vocabularyUrl: config.portal_url +\n '/@@getVocabulary?name=plone.app.vocabularies.Catalog'\n },\n tiny: config,\n prependToUrl: 'resolveuid/',\n linkAttribute: 'UID',\n prependToScalePart: '/@@images/image/'\n })\n \"\"\"\n\n generator = TinyMCESettingsGenerator(self.context, self.request)\n settings = generator.settings\n folder = aq_inner(self.context)\n\n # Test if we are currently creating an Archetype object\n if IFactoryTempFolder.providedBy(aq_parent(folder)):\n folder = aq_parent(aq_parent(aq_parent(folder)))\n if not IFolderish.providedBy(folder):\n folder = aq_parent(folder)\n\n if IPloneSiteRoot.providedBy(folder):\n initial = None\n else:\n initial = IUUID(folder, None)\n\n portal = get_portal()\n portal_url = portal.absolute_url()\n current_path = folder.absolute_url()[len(portal_url) :]\n\n image_types = settings.image_objects or []\n\n server_url = self.request.get(\"SERVER_URL\", \"\")\n site_path = portal_url[len(server_url) :]\n\n related_items_config = get_relateditems_options(\n context=self.context,\n value=None,\n separator=\";\",\n vocabulary_name=\"plone.app.vocabularies.Catalog\",\n vocabulary_view=\"@@getVocabulary\",\n field_name=None,\n )\n related_items_config = call_callables(related_items_config, self.context)\n\n configuration = {\n \"base_url\": self.context.absolute_url(),\n \"imageTypes\": image_types,\n \"imageScales\": self.image_scales,\n \"linkAttribute\": \"UID\",\n # This is for loading the languages on tinymce\n \"loadingBaseUrl\": \"{}/++plone++static/components/tinymce-builded/\"\n \"js/tinymce\".format(portal_url),\n \"relatedItems\": related_items_config,\n \"prependToScalePart\": \"/@@images/image/\",\n \"prependToUrl\": \"{}/resolveuid/\".format(site_path.rstrip(\"/\")),\n \"inline\": settings.inline,\n \"tiny\": generator.get_tiny_config(),\n \"upload\": {\n \"baseUrl\": portal_url,\n \"currentPath\": current_path,\n \"initialFolder\": initial,\n \"maxFiles\": 1,\n \"relativePath\": \"@@fileUpload\",\n \"showTitle\": False,\n \"uploadMultiple\": False,\n },\n }\n return {\"data-pat-tinymce\": json.dumps(configuration)}\n", "path": "Products/CMFPlone/patterns/settings.py"}]} | 2,013 | 143 |
gh_patches_debug_19528 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Indicator target and disaggregation targets bugs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/serializers/indicator.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from akvo.rest.serializers.indicator_period import (
8 IndicatorPeriodFrameworkSerializer, IndicatorPeriodFrameworkLiteSerializer,
9 IndicatorPeriodFrameworkNotSoLiteSerializer, create_or_update_disaggregation_targets)
10 from akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer
11 from akvo.rest.serializers.indicator_custom_field import IndicatorCustomValueSerializer
12 from akvo.rest.serializers.indicator_reference import IndicatorReferenceSerializer
13 from akvo.rest.serializers.rsr_serializer import BaseRSRSerializer
14 from akvo.rsr.models import (
15 Indicator, IndicatorDimensionName, IndicatorLabel, IndicatorDisaggregationTarget)
16
17 from rest_framework import serializers
18
19
20 def serialize_disaggregation_targets(indicator):
21 return [
22 {
23 'id': t.id,
24 'value': t.value,
25 'dimension_value': t.dimension_value_id,
26 'indicator': indicator.id,
27 }
28 for t in indicator.disaggregation_targets.all()
29 ]
30
31
32 class IndicatorDisaggregationTargetNestedSerializer(BaseRSRSerializer):
33 id = serializers.IntegerField()
34
35 class Meta:
36 model = IndicatorDisaggregationTarget
37 fields = ('id', 'value', 'dimension_value', 'indicator')
38 read_only_fields = ('id', 'indicator')
39
40 def to_internal_value(self, data):
41 if 'value' in data:
42 data['value'] = str(data['value']).replace(',', '.')
43 return super().to_internal_value(data)
44
45
46 class LabelListingField(serializers.RelatedField):
47
48 def to_representation(self, labels):
49 if isinstance(labels, IndicatorLabel):
50 value = labels.label_id
51 else:
52 value = list(labels.values_list('label_id', flat=True))
53 return value
54
55 def to_internal_value(self, org_label_ids):
56 indicator = self.root.instance
57 existing_labels = set(indicator.labels.values_list('label_id', flat=True))
58 new_labels = set(org_label_ids) - existing_labels
59 deleted_labels = existing_labels - set(org_label_ids)
60 labels = [IndicatorLabel(indicator=indicator, label_id=org_label_id) for org_label_id in new_labels]
61 IndicatorLabel.objects.bulk_create(labels)
62 if deleted_labels:
63 IndicatorLabel.objects.filter(label_id__in=deleted_labels).delete()
64
65 return indicator.labels.all()
66
67
68 class IndicatorSerializer(BaseRSRSerializer):
69
70 result_unicode = serializers.ReadOnlyField(source='result.__str__')
71 measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')
72 children_aggregate_percentage = serializers.ReadOnlyField()
73 dimension_names = serializers.PrimaryKeyRelatedField(
74 many=True, queryset=IndicatorDimensionName.objects.all())
75 disaggregation_targets = serializers.SerializerMethodField()
76
77 def get_disaggregation_targets(self, obj):
78 return serialize_disaggregation_targets(obj)
79
80 class Meta:
81 model = Indicator
82 exclude = ['enumerators']
83
84 # TODO: add validation for parent_indicator
85
86
87 class IndicatorFrameworkSerializer(BaseRSRSerializer):
88
89 periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True)
90 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')
91 children_aggregate_percentage = serializers.ReadOnlyField()
92 dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)
93 labels = LabelListingField(queryset=IndicatorLabel.objects.all(), required=False)
94 disaggregation_targets = IndicatorDisaggregationTargetNestedSerializer(many=True, required=False)
95
96 class Meta:
97 model = Indicator
98 exclude = ['enumerators']
99
100 def update(self, instance, validated_data):
101 disaggregation_targets = validated_data.pop('disaggregation_targets', [])
102 instance = super().update(instance, validated_data)
103 create_or_update_disaggregation_targets(instance, disaggregation_targets)
104 return instance
105
106 def validate_disaggregation_targets(self, data):
107 for target in data:
108 if 'value' not in target:
109 raise serializers.ValidationError('Disaggregation targets should have a value')
110 if 'dimension_value' not in target:
111 raise serializers.ValidationError(
112 'Disaggregation targets should have "dimension_value"')
113 return data
114
115 def to_internal_value(self, data):
116 if 'target_value' in data:
117 data['target_value'] = str(data['target_value']).replace(',', '.')
118 return super().to_internal_value(data)
119
120
121 class IndicatorFrameworkLiteSerializer(BaseRSRSerializer):
122
123 periods = IndicatorPeriodFrameworkLiteSerializer(many=True, required=False, read_only=True)
124 references = IndicatorReferenceSerializer(many=True, required=False, read_only=True)
125 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')
126 children_aggregate_percentage = serializers.ReadOnlyField()
127 dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)
128 labels = LabelListingField(read_only=True)
129 disaggregation_targets = serializers.SerializerMethodField()
130 custom_values = IndicatorCustomValueSerializer(many=True, required=False)
131
132 def get_disaggregation_targets(self, obj):
133 return serialize_disaggregation_targets(obj)
134
135 class Meta:
136 model = Indicator
137 exclude = ['enumerators']
138
139
140 class IndicatorFrameworkNotSoLiteSerializer(BaseRSRSerializer):
141
142 periods = IndicatorPeriodFrameworkNotSoLiteSerializer(many=True, required=False, read_only=True)
143 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')
144 children_aggregate_percentage = serializers.ReadOnlyField()
145 labels = LabelListingField(read_only=True)
146 disaggregation_targets = serializers.SerializerMethodField()
147 dimension_names = serializers.SerializerMethodField()
148
149 def get_disaggregation_targets(self, obj):
150 return serialize_disaggregation_targets(obj)
151
152 def get_dimension_names(self, obj):
153 return [
154 {
155 'id': n.id,
156 'name': n.name,
157 'dimension_values': [{'id': v.id, 'value': v.value} for v in n.dimension_values.all()]
158 }
159 for n in obj.dimension_names.all()
160 ]
161
162 class Meta:
163 model = Indicator
164 fields = (
165 'id',
166 'periods',
167 'parent_indicator',
168 'children_aggregate_percentage',
169 'labels',
170 'title',
171 'type',
172 'measure',
173 'ascending',
174 'description',
175 'baseline_year',
176 'baseline_value',
177 'baseline_comment',
178 'order',
179 'export_to_iati',
180 'result',
181 'disaggregation_targets',
182 'dimension_names',
183 'scores',
184 )
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/serializers/indicator.py b/akvo/rest/serializers/indicator.py
--- a/akvo/rest/serializers/indicator.py
+++ b/akvo/rest/serializers/indicator.py
@@ -37,8 +37,8 @@
read_only_fields = ('id', 'indicator')
def to_internal_value(self, data):
- if 'value' in data:
- data['value'] = str(data['value']).replace(',', '.')
+ value = data.get('value', None)
+ data['value'] = str(value).replace(',', '.') if value is not None else None
return super().to_internal_value(data)
@@ -112,8 +112,10 @@
return data
def to_internal_value(self, data):
- if 'target_value' in data:
+ if 'target_value' in data and data['target_value'] is not None:
data['target_value'] = str(data['target_value']).replace(',', '.')
+ if 'disaggregation_targets' in data:
+ data['disaggregation_targets'] = [dt for dt in data['disaggregation_targets'] if dt]
return super().to_internal_value(data)
| {"golden_diff": "diff --git a/akvo/rest/serializers/indicator.py b/akvo/rest/serializers/indicator.py\n--- a/akvo/rest/serializers/indicator.py\n+++ b/akvo/rest/serializers/indicator.py\n@@ -37,8 +37,8 @@\n read_only_fields = ('id', 'indicator')\n \n def to_internal_value(self, data):\n- if 'value' in data:\n- data['value'] = str(data['value']).replace(',', '.')\n+ value = data.get('value', None)\n+ data['value'] = str(value).replace(',', '.') if value is not None else None\n return super().to_internal_value(data)\n \n \n@@ -112,8 +112,10 @@\n return data\n \n def to_internal_value(self, data):\n- if 'target_value' in data:\n+ if 'target_value' in data and data['target_value'] is not None:\n data['target_value'] = str(data['target_value']).replace(',', '.')\n+ if 'disaggregation_targets' in data:\n+ data['disaggregation_targets'] = [dt for dt in data['disaggregation_targets'] if dt]\n return super().to_internal_value(data)\n", "issue": "Indicator target and disaggregation targets bugs\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rest.serializers.indicator_period import (\n IndicatorPeriodFrameworkSerializer, IndicatorPeriodFrameworkLiteSerializer,\n IndicatorPeriodFrameworkNotSoLiteSerializer, create_or_update_disaggregation_targets)\nfrom akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer\nfrom akvo.rest.serializers.indicator_custom_field import IndicatorCustomValueSerializer\nfrom akvo.rest.serializers.indicator_reference import IndicatorReferenceSerializer\nfrom akvo.rest.serializers.rsr_serializer import BaseRSRSerializer\nfrom akvo.rsr.models import (\n Indicator, IndicatorDimensionName, IndicatorLabel, IndicatorDisaggregationTarget)\n\nfrom rest_framework import serializers\n\n\ndef serialize_disaggregation_targets(indicator):\n return [\n {\n 'id': t.id,\n 'value': t.value,\n 'dimension_value': t.dimension_value_id,\n 'indicator': indicator.id,\n }\n for t in indicator.disaggregation_targets.all()\n ]\n\n\nclass IndicatorDisaggregationTargetNestedSerializer(BaseRSRSerializer):\n id = serializers.IntegerField()\n\n class Meta:\n model = IndicatorDisaggregationTarget\n fields = ('id', 'value', 'dimension_value', 'indicator')\n read_only_fields = ('id', 'indicator')\n\n def to_internal_value(self, data):\n if 'value' in data:\n data['value'] = str(data['value']).replace(',', '.')\n return super().to_internal_value(data)\n\n\nclass LabelListingField(serializers.RelatedField):\n\n def to_representation(self, labels):\n if isinstance(labels, IndicatorLabel):\n value = labels.label_id\n else:\n value = list(labels.values_list('label_id', flat=True))\n return value\n\n def to_internal_value(self, org_label_ids):\n indicator = self.root.instance\n existing_labels = set(indicator.labels.values_list('label_id', flat=True))\n new_labels = set(org_label_ids) - existing_labels\n deleted_labels = existing_labels - set(org_label_ids)\n labels = [IndicatorLabel(indicator=indicator, label_id=org_label_id) for org_label_id in new_labels]\n IndicatorLabel.objects.bulk_create(labels)\n if deleted_labels:\n IndicatorLabel.objects.filter(label_id__in=deleted_labels).delete()\n\n return indicator.labels.all()\n\n\nclass IndicatorSerializer(BaseRSRSerializer):\n\n result_unicode = serializers.ReadOnlyField(source='result.__str__')\n measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = serializers.PrimaryKeyRelatedField(\n many=True, queryset=IndicatorDimensionName.objects.all())\n disaggregation_targets = serializers.SerializerMethodField()\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n # TODO: add validation for parent_indicator\n\n\nclass IndicatorFrameworkSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n labels = LabelListingField(queryset=IndicatorLabel.objects.all(), required=False)\n disaggregation_targets = IndicatorDisaggregationTargetNestedSerializer(many=True, required=False)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n def update(self, instance, validated_data):\n disaggregation_targets = validated_data.pop('disaggregation_targets', [])\n instance = super().update(instance, validated_data)\n create_or_update_disaggregation_targets(instance, disaggregation_targets)\n return instance\n\n def validate_disaggregation_targets(self, data):\n for target in data:\n if 'value' not in target:\n raise serializers.ValidationError('Disaggregation targets should have a value')\n if 'dimension_value' not in target:\n raise serializers.ValidationError(\n 'Disaggregation targets should have \"dimension_value\"')\n return data\n\n def to_internal_value(self, data):\n if 'target_value' in data:\n data['target_value'] = str(data['target_value']).replace(',', '.')\n return super().to_internal_value(data)\n\n\nclass IndicatorFrameworkLiteSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkLiteSerializer(many=True, required=False, read_only=True)\n references = IndicatorReferenceSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n labels = LabelListingField(read_only=True)\n disaggregation_targets = serializers.SerializerMethodField()\n custom_values = IndicatorCustomValueSerializer(many=True, required=False)\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n\nclass IndicatorFrameworkNotSoLiteSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkNotSoLiteSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n labels = LabelListingField(read_only=True)\n disaggregation_targets = serializers.SerializerMethodField()\n dimension_names = serializers.SerializerMethodField()\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n def get_dimension_names(self, obj):\n return [\n {\n 'id': n.id,\n 'name': n.name,\n 'dimension_values': [{'id': v.id, 'value': v.value} for v in n.dimension_values.all()]\n }\n for n in obj.dimension_names.all()\n ]\n\n class Meta:\n model = Indicator\n fields = (\n 'id',\n 'periods',\n 'parent_indicator',\n 'children_aggregate_percentage',\n 'labels',\n 'title',\n 'type',\n 'measure',\n 'ascending',\n 'description',\n 'baseline_year',\n 'baseline_value',\n 'baseline_comment',\n 'order',\n 'export_to_iati',\n 'result',\n 'disaggregation_targets',\n 'dimension_names',\n 'scores',\n )\n", "path": "akvo/rest/serializers/indicator.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rest.serializers.indicator_period import (\n IndicatorPeriodFrameworkSerializer, IndicatorPeriodFrameworkLiteSerializer,\n IndicatorPeriodFrameworkNotSoLiteSerializer, create_or_update_disaggregation_targets)\nfrom akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer\nfrom akvo.rest.serializers.indicator_custom_field import IndicatorCustomValueSerializer\nfrom akvo.rest.serializers.rsr_serializer import BaseRSRSerializer\nfrom akvo.rsr.models import (\n Indicator, IndicatorDimensionName, IndicatorLabel, IndicatorDisaggregationTarget)\n\nfrom rest_framework import serializers\n\n\ndef serialize_disaggregation_targets(indicator):\n return [\n {\n 'id': t.id,\n 'value': t.value,\n 'dimension_value': t.dimension_value_id,\n 'indicator': indicator.id,\n }\n for t in indicator.disaggregation_targets.all()\n ]\n\n\nclass IndicatorDisaggregationTargetNestedSerializer(BaseRSRSerializer):\n id = serializers.IntegerField()\n\n class Meta:\n model = IndicatorDisaggregationTarget\n fields = ('id', 'value', 'dimension_value', 'indicator')\n read_only_fields = ('id', 'indicator')\n\n def to_internal_value(self, data):\n value = data.get('value', None)\n data['value'] = str(value).replace(',', '.') if value is not None else None\n return super().to_internal_value(data)\n\n\nclass LabelListingField(serializers.RelatedField):\n\n def to_representation(self, labels):\n if isinstance(labels, IndicatorLabel):\n value = labels.label_id\n else:\n value = list(labels.values_list('label_id', flat=True))\n return value\n\n def to_internal_value(self, org_label_ids):\n indicator = self.root.instance\n existing_labels = set(indicator.labels.values_list('label_id', flat=True))\n new_labels = set(org_label_ids) - existing_labels\n deleted_labels = existing_labels - set(org_label_ids)\n labels = [IndicatorLabel(indicator=indicator, label_id=org_label_id) for org_label_id in new_labels]\n IndicatorLabel.objects.bulk_create(labels)\n if deleted_labels:\n IndicatorLabel.objects.filter(label_id__in=deleted_labels).delete()\n\n return indicator.labels.all()\n\n\nclass IndicatorSerializer(BaseRSRSerializer):\n\n result_unicode = serializers.ReadOnlyField(source='result.__str__')\n measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = serializers.PrimaryKeyRelatedField(\n many=True, queryset=IndicatorDimensionName.objects.all())\n disaggregation_targets = serializers.SerializerMethodField()\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n # TODO: add validation for parent_indicator\n\n\nclass IndicatorFrameworkSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n labels = LabelListingField(queryset=IndicatorLabel.objects.all(), required=False)\n disaggregation_targets = IndicatorDisaggregationTargetNestedSerializer(many=True, required=False)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n def update(self, instance, validated_data):\n disaggregation_targets = validated_data.pop('disaggregation_targets', [])\n instance = super().update(instance, validated_data)\n create_or_update_disaggregation_targets(instance, disaggregation_targets)\n return instance\n\n def validate_disaggregation_targets(self, data):\n for target in data:\n if 'value' not in target:\n raise serializers.ValidationError('Disaggregation targets should have a value')\n if 'dimension_value' not in target:\n raise serializers.ValidationError(\n 'Disaggregation targets should have \"dimension_value\"')\n return data\n\n def to_internal_value(self, data):\n if 'target_value' in data and data['target_value'] is not None:\n data['target_value'] = str(data['target_value']).replace(',', '.')\n if 'disaggregation_targets' in data:\n data['disaggregation_targets'] = [dt for dt in data['disaggregation_targets'] if dt]\n return super().to_internal_value(data)\n\n\nclass IndicatorFrameworkLiteSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkLiteSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n labels = LabelListingField(read_only=True)\n disaggregation_targets = serializers.SerializerMethodField()\n custom_values = IndicatorCustomValueSerializer(many=True, required=False)\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n class Meta:\n model = Indicator\n exclude = ['enumerators']\n\n\nclass IndicatorFrameworkNotSoLiteSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkNotSoLiteSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n labels = LabelListingField(read_only=True)\n disaggregation_targets = serializers.SerializerMethodField()\n dimension_names = serializers.SerializerMethodField()\n\n def get_disaggregation_targets(self, obj):\n return serialize_disaggregation_targets(obj)\n\n def get_dimension_names(self, obj):\n return [\n {\n 'id': n.id,\n 'name': n.name,\n 'dimension_values': [{'id': v.id, 'value': v.value} for v in n.dimension_values.all()]\n }\n for n in obj.dimension_names.all()\n ]\n\n class Meta:\n model = Indicator\n fields = (\n 'id',\n 'periods',\n 'parent_indicator',\n 'children_aggregate_percentage',\n 'labels',\n 'title',\n 'type',\n 'measure',\n 'ascending',\n 'description',\n 'baseline_year',\n 'baseline_value',\n 'baseline_comment',\n 'order',\n 'export_to_iati',\n 'result',\n 'disaggregation_targets',\n 'dimension_names',\n 'scores',\n )\n", "path": "akvo/rest/serializers/indicator.py"}]} | 2,137 | 272 |
gh_patches_debug_1148 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-19 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix some problems in the ctc beam search decoder
- [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary;
- [x] Add version check in the setup script;
- [x] Remove unused code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `decoders/swig/setup.py`
Content:
```
1 """Script to build and install decoder package."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5
6 from setuptools import setup, Extension, distutils
7 import glob
8 import platform
9 import os, sys
10 import multiprocessing.pool
11 import argparse
12
13 parser = argparse.ArgumentParser(description=__doc__)
14 parser.add_argument(
15 "--num_processes",
16 default=1,
17 type=int,
18 help="Number of cpu processes to build package. (default: %(default)d)")
19 args = parser.parse_known_args()
20
21 # reconstruct sys.argv to pass to setup below
22 sys.argv = [sys.argv[0]] + args[1]
23
24
25 # monkey-patch for parallel compilation
26 # See: https://stackoverflow.com/a/13176803
27 def parallelCCompile(self,
28 sources,
29 output_dir=None,
30 macros=None,
31 include_dirs=None,
32 debug=0,
33 extra_preargs=None,
34 extra_postargs=None,
35 depends=None):
36 # those lines are copied from distutils.ccompiler.CCompiler directly
37 macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
38 output_dir, macros, include_dirs, sources, depends, extra_postargs)
39 cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
40
41 # parallel code
42 def _single_compile(obj):
43 try:
44 src, ext = build[obj]
45 except KeyError:
46 return
47 self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
48
49 # convert to list, imap is evaluated on-demand
50 thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)
51 list(thread_pool.imap(_single_compile, objects))
52 return objects
53
54
55 def compile_test(header, library):
56 dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
57 command = "bash -c \"g++ -include " + header \
58 + " -l" + library + " -x c++ - <<<'int main() {}' -o " \
59 + dummy_path + " >/dev/null 2>/dev/null && rm " \
60 + dummy_path + " 2>/dev/null\""
61 return os.system(command) == 0
62
63
64 # hack compile to support parallel compiling
65 distutils.ccompiler.CCompiler.compile = parallelCCompile
66
67 FILES = glob.glob('kenlm/util/*.cc') \
68 + glob.glob('kenlm/lm/*.cc') \
69 + glob.glob('kenlm/util/double-conversion/*.cc')
70
71 FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
72
73 FILES = [
74 fn for fn in FILES
75 if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(
76 'unittest.cc'))
77 ]
78
79 LIBS = ['stdc++']
80 if platform.system() != 'Darwin':
81 LIBS.append('rt')
82
83 ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']
84
85 if compile_test('zlib.h', 'z'):
86 ARGS.append('-DHAVE_ZLIB')
87 LIBS.append('z')
88
89 if compile_test('bzlib.h', 'bz2'):
90 ARGS.append('-DHAVE_BZLIB')
91 LIBS.append('bz2')
92
93 if compile_test('lzma.h', 'lzma'):
94 ARGS.append('-DHAVE_XZLIB')
95 LIBS.append('lzma')
96
97 os.system('swig -python -c++ ./decoders.i')
98
99 decoders_module = [
100 Extension(
101 name='_swig_decoders',
102 sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),
103 language='c++',
104 include_dirs=[
105 '.',
106 'kenlm',
107 'openfst-1.6.3/src/include',
108 'ThreadPool',
109 ],
110 libraries=LIBS,
111 extra_compile_args=ARGS)
112 ]
113
114 setup(
115 name='swig_decoders',
116 version='1.0',
117 description="""CTC decoders""",
118 ext_modules=decoders_module,
119 py_modules=['swig_decoders'], )
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py
--- a/decoders/swig/setup.py
+++ b/decoders/swig/setup.py
@@ -113,7 +113,7 @@
setup(
name='swig_decoders',
- version='1.0',
+ version='1.1',
description="""CTC decoders""",
ext_modules=decoders_module,
py_modules=['swig_decoders'], )
| {"golden_diff": "diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py\n--- a/decoders/swig/setup.py\n+++ b/decoders/swig/setup.py\n@@ -113,7 +113,7 @@\n \n setup(\n name='swig_decoders',\n- version='1.0',\n+ version='1.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "issue": "Fix some problems in the ctc beam search decoder\n- [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary;\r\n- [x] Add version check in the setup script;\r\n- [x] Remove unused code. \r\n\n", "before_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "decoders/swig/setup.py"}], "after_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "decoders/swig/setup.py"}]} | 1,469 | 107 |
gh_patches_debug_12031 | rasdani/github-patches | git_diff | pyca__cryptography-2766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
x509.CertifcateBuilder().sign() fails with "Unknown OpenSSL error' when subject has bad country code
The x509.CertifcateBuilder().sign() function fails with an unknown OpenSSL error when the builder is provided with an invalid country code:
```
Traceback (most recent call last):
File "./demo.py", line 30, in <module>
ca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())
File "/usr/local/lib/python3.4/dist-packages/cryptography/x509/base.py", line 520, in sign
return backend.create_x509_certificate(self, private_key, algorithm)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/multibackend.py", line 381, in create_x509_certificate
return b.create_x509_certificate(builder, private_key, algorithm)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 1402, in create_x509_certificate
x509_cert, _encode_name_gc(self, list(builder._subject_name))
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 158, in _encode_name_gc
subject = _encode_name(backend, attributes)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 153, in _encode_name
backend.openssl_assert(res == 1)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 719, in openssl_assert
return binding._openssl_assert(self._lib, ok)
File "/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/bindings/openssl/binding.py", line 43, in _openssl_assert
errors
cryptography.exceptions.InternalError: Unknown OpenSSL error. Please file an issue at https://github.com/pyca/cryptography/issues with information on how to reproduce this. ([_OpenSSLError(code=218603671, lib=13, func=122, reason=151)])
```
The demo code below reproduces this error. Tested on Ubuntu 14.04.3 x64 w/ Python 3.4.3, cryptography 1.2.1, and libssl-dev 1.0.1f-1ubuntu2.16.
```
#!/usr/bin/env python3
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
DUR_ONE_DAY = datetime.timedelta(1, 0, 0)
DUR_ONE_YEAR = datetime.timedelta(366, 0, 0)
ca_key = rsa.generate_private_key(65537, 4096, default_backend())
sub_attr = [x509.NameAttribute(x509.NameOID.COUNTRY_NAME, "InvalidCC")]
builder = x509.CertificateBuilder()
builder = builder.issuer_name(x509.Name(sub_attr))
builder = builder.subject_name(x509.Name(sub_attr))
builder = builder.not_valid_before(datetime.datetime.today() - DUR_ONE_DAY)
builder = builder.not_valid_after(datetime.datetime.today() + DUR_ONE_YEAR)
builder = builder.serial_number(3)
builder = builder.public_key(ca_key.public_key())
extensions = []
extensions.append(x509.BasicConstraints(ca=True, path_length=1))
for ext in extensions:
builder = builder.add_extension(ext, critical=True)
ca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())
# builder.sign() will fail with when CONTRY_NAME is invalid country code:
# cryptography.exceptions.InternalError: Unknown
# OpenSSL error. Please file an issue at
# https://github.com/pyca/cryptography/issues with information on
# how to reproduce this. ([_OpenSSLError(code=218603671, lib=13,
# func=122, reason=151)])
```
Might be nice to raise a more relevant error message. Or catch this even sooner when the NameAttribute is added.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/x509/name.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.x509.oid import ObjectIdentifier
11
12
13 class NameAttribute(object):
14 def __init__(self, oid, value):
15 if not isinstance(oid, ObjectIdentifier):
16 raise TypeError(
17 "oid argument must be an ObjectIdentifier instance."
18 )
19
20 if not isinstance(value, six.text_type):
21 raise TypeError(
22 "value argument must be a text type."
23 )
24
25 self._oid = oid
26 self._value = value
27
28 oid = utils.read_only_property("_oid")
29 value = utils.read_only_property("_value")
30
31 def __eq__(self, other):
32 if not isinstance(other, NameAttribute):
33 return NotImplemented
34
35 return (
36 self.oid == other.oid and
37 self.value == other.value
38 )
39
40 def __ne__(self, other):
41 return not self == other
42
43 def __hash__(self):
44 return hash((self.oid, self.value))
45
46 def __repr__(self):
47 return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
48
49
50 class Name(object):
51 def __init__(self, attributes):
52 self._attributes = attributes
53
54 def get_attributes_for_oid(self, oid):
55 return [i for i in self if i.oid == oid]
56
57 def __eq__(self, other):
58 if not isinstance(other, Name):
59 return NotImplemented
60
61 return self._attributes == other._attributes
62
63 def __ne__(self, other):
64 return not self == other
65
66 def __hash__(self):
67 # TODO: this is relatively expensive, if this looks like a bottleneck
68 # for you, consider optimizing!
69 return hash(tuple(self._attributes))
70
71 def __iter__(self):
72 return iter(self._attributes)
73
74 def __len__(self):
75 return len(self._attributes)
76
77 def __repr__(self):
78 return "<Name({0!r})>".format(self._attributes)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py
--- a/src/cryptography/x509/name.py
+++ b/src/cryptography/x509/name.py
@@ -7,7 +7,7 @@
import six
from cryptography import utils
-from cryptography.x509.oid import ObjectIdentifier
+from cryptography.x509.oid import NameOID, ObjectIdentifier
class NameAttribute(object):
@@ -22,6 +22,11 @@
"value argument must be a text type."
)
+ if oid == NameOID.COUNTRY_NAME and len(value.encode("utf8")) != 2:
+ raise ValueError(
+ "Country name must be a 2 character country code"
+ )
+
self._oid = oid
self._value = value
| {"golden_diff": "diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py\n--- a/src/cryptography/x509/name.py\n+++ b/src/cryptography/x509/name.py\n@@ -7,7 +7,7 @@\n import six\n \n from cryptography import utils\n-from cryptography.x509.oid import ObjectIdentifier\n+from cryptography.x509.oid import NameOID, ObjectIdentifier\n \n \n class NameAttribute(object):\n@@ -22,6 +22,11 @@\n \"value argument must be a text type.\"\n )\n \n+ if oid == NameOID.COUNTRY_NAME and len(value.encode(\"utf8\")) != 2:\n+ raise ValueError(\n+ \"Country name must be a 2 character country code\"\n+ )\n+\n self._oid = oid\n self._value = value\n", "issue": "x509.CertifcateBuilder().sign() fails with \"Unknown OpenSSL error' when subject has bad country code\nThe x509.CertifcateBuilder().sign() function fails with an unknown OpenSSL error when the builder is provided with an invalid country code:\n\n```\nTraceback (most recent call last):\n File \"./demo.py\", line 30, in <module>\n ca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/x509/base.py\", line 520, in sign\n return backend.create_x509_certificate(self, private_key, algorithm)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/multibackend.py\", line 381, in create_x509_certificate\n return b.create_x509_certificate(builder, private_key, algorithm)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 1402, in create_x509_certificate\n x509_cert, _encode_name_gc(self, list(builder._subject_name))\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 158, in _encode_name_gc\n subject = _encode_name(backend, attributes)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 153, in _encode_name\n backend.openssl_assert(res == 1)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 719, in openssl_assert\n return binding._openssl_assert(self._lib, ok)\n File \"/usr/local/lib/python3.4/dist-packages/cryptography/hazmat/bindings/openssl/binding.py\", line 43, in _openssl_assert\n errors\ncryptography.exceptions.InternalError: Unknown OpenSSL error. Please file an issue at https://github.com/pyca/cryptography/issues with information on how to reproduce this. ([_OpenSSLError(code=218603671, lib=13, func=122, reason=151)])\n```\n\nThe demo code below reproduces this error. Tested on Ubuntu 14.04.3 x64 w/ Python 3.4.3, cryptography 1.2.1, and libssl-dev 1.0.1f-1ubuntu2.16.\n\n```\n#!/usr/bin/env python3\n\nimport datetime\n\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\nDUR_ONE_DAY = datetime.timedelta(1, 0, 0)\nDUR_ONE_YEAR = datetime.timedelta(366, 0, 0)\n\nca_key = rsa.generate_private_key(65537, 4096, default_backend())\n\nsub_attr = [x509.NameAttribute(x509.NameOID.COUNTRY_NAME, \"InvalidCC\")]\n\nbuilder = x509.CertificateBuilder()\nbuilder = builder.issuer_name(x509.Name(sub_attr))\nbuilder = builder.subject_name(x509.Name(sub_attr))\nbuilder = builder.not_valid_before(datetime.datetime.today() - DUR_ONE_DAY)\nbuilder = builder.not_valid_after(datetime.datetime.today() + DUR_ONE_YEAR)\nbuilder = builder.serial_number(3)\nbuilder = builder.public_key(ca_key.public_key())\n\nextensions = []\nextensions.append(x509.BasicConstraints(ca=True, path_length=1))\nfor ext in extensions:\n builder = builder.add_extension(ext, critical=True)\n\nca_crt = builder.sign(private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend())\n\n# builder.sign() will fail with when CONTRY_NAME is invalid country code:\n# cryptography.exceptions.InternalError: Unknown\n# OpenSSL error. Please file an issue at\n# https://github.com/pyca/cryptography/issues with information on\n# how to reproduce this. ([_OpenSSLError(code=218603671, lib=13,\n# func=122, reason=151)])\n```\n\nMight be nice to raise a more relevant error message. Or catch this even sooner when the NameAttribute is added. \n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.oid, self.value))\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n if oid == NameOID.COUNTRY_NAME and len(value.encode(\"utf8\")) != 2:\n raise ValueError(\n \"Country name must be a 2 character country code\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.oid, self.value))\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}]} | 1,878 | 185 |
gh_patches_debug_14216 | rasdani/github-patches | git_diff | facebookresearch__fairseq-4623 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uninitialised bias parameters in RelPositionMultiHeadedAttention
## 🐛 Bug
The (u, v) bias terms in the MHA layer with relative position encodings are defined using `torch.Tensor`s:
https://github.com/facebookresearch/fairseq/blob/4fe8583396191c22011350248119db98ec1b5cb8/fairseq/modules/espnet_multihead_attention.py#L127-L128
Since `torch.Tensor` creates an un-initialised tensor, its use results in non-deterministic behaviour, poor initialisation, and nans if you have an unlucky init:
```python
>>>import torch
>>> torch.Tensor(1, 1).sum()
tensor(0.)
>>> torch.Tensor(10, 10).sum()
tensor(4.9312e-38)
>>> torch.Tensor(100, 100).sum()
tensor(nan)
```
Section 3.3 of the referenced paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/pdf/1901.02860.pdf) does not specify the initialisation for the bias terms. Since `torch.Tensor` is typically populated with zeros, setting the initialisation to `torch.zeros` should provide a fix that is close to the intended behaviour.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairseq/modules/espnet_multihead_attention.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3
4 # Copyright 2019 Shigeki Karita
5 # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
6
7 """Multi-Head Attention layer definition."""
8
9 import math
10 import torch
11 from torch import nn
12 from fairseq.modules.rotary_positional_embedding import (
13 RotaryPositionalEmbedding,
14 apply_rotary_pos_emb,
15 )
16
17
18 class ESPNETMultiHeadedAttention(nn.Module):
19 """Multi-Head Attention layer.
20 Args:
21 n_head: The number of heads.
22 n_feat: The number of features.
23 dropout: Dropout rate.
24 """
25
26 def __init__(self, n_feat, n_head, dropout):
27 """Construct an MultiHeadedAttention object."""
28 super(ESPNETMultiHeadedAttention, self).__init__()
29 assert n_feat % n_head == 0
30 # We assume d_v always equals d_k
31 self.d_k = n_feat // n_head
32 self.h = n_head
33 self.linear_q = nn.Linear(n_feat, n_feat)
34 self.linear_k = nn.Linear(n_feat, n_feat)
35 self.linear_v = nn.Linear(n_feat, n_feat)
36 self.linear_out = nn.Linear(n_feat, n_feat)
37 self.attn = None
38 self.dropout = nn.Dropout(p=dropout)
39
40 def forward_qkv(self, query, key, value, **kwargs):
41 """Transform query, key and value.
42 Args:
43 query: Query tensor B X T1 X C
44 key: Key tensor B X T2 X C
45 value: Value tensor B X T2 X C
46 Returns:
47 torch.Tensor: Transformed query tensor B X n_head X T1 X d_k
48 torch.Tensor: Transformed key tensor B X n_head X T2 X d_k
49 torch.Tensor: Transformed value tensor B X n_head X T2 X d_k
50 """
51 n_batch = query.size(0)
52 q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
53 k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
54 v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
55 q = q.transpose(1, 2) # (batch, head, time1, d_k)
56 k = k.transpose(1, 2) # (batch, head, time2, d_k)
57 v = v.transpose(1, 2) # (batch, head, time2, d_k)
58 return q, k, v
59
60 def forward_attention(self, value, scores, mask):
61 """Compute attention context vector.
62 Args:
63 value: Transformed value B X n_head X T2 X d_k.
64 scores: Attention score B X n_head X T1 X T2
65 mask: Mask T2 X B
66 Returns:
67 torch.Tensor: Transformed value B X T1 X d_model
68 weighted by the attention score B X T1 X T2
69 """
70 n_batch = value.size(0)
71 if mask is not None:
72 scores = scores.masked_fill(
73 mask.unsqueeze(1).unsqueeze(2).to(bool),
74 float("-inf"), # (batch, head, time1, time2)
75 )
76 self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
77
78 else:
79 self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
80 p_attn = self.dropout(self.attn)
81 x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
82 x = (
83 x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
84 ) # (batch, time1, d_model)
85
86 return self.linear_out(x) # (batch, time1, d_model)
87
88 def forward(self, query, key, value, key_padding_mask=None, **kwargs):
89 """Compute scaled dot product attention.
90 Args:
91 query (torch.Tensor): Query tensor T X B X C
92 key (torch.Tensor): Key tensor T X B X C
93 value (torch.Tensor): Value tensor T X B X C
94 mask (torch.Tensor): Mask tensor T X B
95 Returns:
96 torch.Tensor: Output tensor T X B X D.
97 """
98 query = query.transpose(0, 1)
99 key = key.transpose(0, 1)
100 value = value.transpose(0, 1)
101
102 q, k, v = self.forward_qkv(query, key, value)
103 scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
104 scores = self.forward_attention(v, scores, key_padding_mask)
105 scores = scores.transpose(0, 1)
106 return scores, None
107
108
109 class RelPositionMultiHeadedAttention(ESPNETMultiHeadedAttention):
110 """Multi-Head Attention layer with relative position encoding.
111 Paper: https://arxiv.org/abs/1901.02860
112 Args:
113 n_head: The number of heads.
114 n_feat: The number of features.
115 dropout: Dropout rate.
116 zero_triu: Whether to zero the upper triangular part of attention matrix.
117 """
118
119 def __init__(self, n_feat, n_head, dropout, zero_triu=False):
120 """Construct an RelPositionMultiHeadedAttention object."""
121 super().__init__(n_feat, n_head, dropout)
122 self.zero_triu = zero_triu
123 # linear transformation for positional encoding
124 self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
125 # these two learnable bias are used in matrix c and matrix d
126 # as described in https://arxiv.org/abs/1901.02860 Section 3.3
127 self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
128 self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
129 torch.nn.init.xavier_uniform_(self.pos_bias_u)
130 torch.nn.init.xavier_uniform_(self.pos_bias_v)
131
132 def rel_shift(self, x):
133 """Compute relative positional encoding.
134 Args:
135 x: Input tensor B X n_head X T X 2T-1
136 Returns:
137 torch.Tensor: Output tensor.
138 """
139 zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
140 x_padded = torch.cat([zero_pad, x], dim=-1)
141
142 x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
143 x = x_padded[:, :, 1:].view_as(x)[
144 :, :, :, : x.size(-1) // 2 + 1
145 ] # only keep the positions from 0 to time2
146
147 if self.zero_triu:
148 ones = torch.ones((x.size(2), x.size(3)), device=x.device)
149 x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
150
151 return x
152
153 def forward(self, query, key, value, pos_emb, key_padding_mask=None, **kwargs):
154 """Compute scaled dot product attention.
155 Args:
156 query: Query tensor T X B X C
157 key: Key tensor T X B X C
158 value: Value tensor T X B X C
159 pos_emb: Positional embedding tensor B X 2T-1 X C
160 key_padding_mask: Mask tensor T X B
161 Returns:
162 torch.Tensor: Output tensor T X B X C.
163 """
164 query = query.transpose(0, 1)
165 key = key.transpose(0, 1)
166 value = value.transpose(0, 1)
167 pos_emb = pos_emb.transpose(0, 1)
168 q, k, v = self.forward_qkv(query, key, value)
169 q = q.transpose(1, 2) # (batch, time1, head, d_k)
170 n_batch_pos = pos_emb.size(0)
171 p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
172 p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
173
174 # (batch, head, time1, d_k)
175 q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
176 # (batch, head, time1, d_k)
177 q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
178
179 # compute attention score
180 # first compute matrix a and matrix c
181 # as described in https://arxiv.org/abs/1901.02860 Section 3.3
182 # (batch, head, time1, time2)
183 matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
184
185 # compute matrix b and matrix d
186 # (batch, head, time1, 2*time1-1)
187 matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
188 matrix_bd = self.rel_shift(matrix_bd)
189
190 scores = (matrix_ac + matrix_bd) / math.sqrt(
191 self.d_k
192 ) # (batch, head, time1, time2)
193
194 scores = self.forward_attention(v, scores, key_padding_mask)
195 scores = scores.transpose(0, 1)
196 return scores, None
197
198
199 class RotaryPositionMultiHeadedAttention(ESPNETMultiHeadedAttention):
200 def __init__(
201 self,
202 n_feat,
203 n_head,
204 dropout,
205 precision,
206 rotary_emd_base=10000,
207 ):
208 """Construct an RotaryPositionMultiHeadedAttention object."""
209 super().__init__(n_feat, n_head, dropout)
210 precision = torch.float
211 self.rotary_ndims = self.d_k # also try self.d_k//2
212 if precision == "fp16":
213 precision = torch.half
214
215 self.rotary_emb = RotaryPositionalEmbedding(
216 self.rotary_ndims, base=rotary_emd_base, precision=precision
217 )
218
219 def forward(self, query, key, value, key_padding_mask=None, **kwargs):
220 """Compute rotary position attention.
221 Args:
222 query: Query tensor T X B X C
223 key: Key tensor T X B X C
224 value: Value tensor T X B X C
225 key_padding_mask: Mask tensor T X B
226 Returns:
227 torch.Tensor: Output tensor T X B X D.
228 Notes:
229 Assumes self attn
230 """
231
232 T, B, C = value.size()
233 query = query.view(T, B, self.h, self.d_k)
234 key = key.view(T, B, self.h, self.d_k)
235 value = value.view(T, B, self.h, self.d_k)
236 cos, sin = self.rotary_emb(value, seq_len=T)
237 query, key = apply_rotary_pos_emb(
238 query, key, cos, sin, offset=0
239 ) # offset is based on layer_past
240
241 query = query.view(T, B, self.h * self.d_k)
242 key = key.view(T, B, self.h * self.d_k)
243 value = value.view(T, B, self.h * self.d_k)
244
245 # TBD to BTD
246 query = query.transpose(0, 1)
247 key = key.transpose(0, 1)
248 value = value.transpose(0, 1)
249
250 q, k, v = self.forward_qkv(query, key, value)
251 scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
252 scores = self.forward_attention(v, scores, key_padding_mask)
253 scores = scores.transpose(0, 1)
254 return scores, None
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairseq/modules/espnet_multihead_attention.py b/fairseq/modules/espnet_multihead_attention.py
--- a/fairseq/modules/espnet_multihead_attention.py
+++ b/fairseq/modules/espnet_multihead_attention.py
@@ -124,8 +124,8 @@
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
- self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
- self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
+ self.pos_bias_u = nn.Parameter(torch.zeros(self.h, self.d_k))
+ self.pos_bias_v = nn.Parameter(torch.zeros(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
| {"golden_diff": "diff --git a/fairseq/modules/espnet_multihead_attention.py b/fairseq/modules/espnet_multihead_attention.py\n--- a/fairseq/modules/espnet_multihead_attention.py\n+++ b/fairseq/modules/espnet_multihead_attention.py\n@@ -124,8 +124,8 @@\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n- self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n- self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n+ self.pos_bias_u = nn.Parameter(torch.zeros(self.h, self.d_k))\n+ self.pos_bias_v = nn.Parameter(torch.zeros(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n", "issue": "Uninitialised bias parameters in RelPositionMultiHeadedAttention\n## \ud83d\udc1b Bug\r\n\r\nThe (u, v) bias terms in the MHA layer with relative position encodings are defined using `torch.Tensor`s:\r\nhttps://github.com/facebookresearch/fairseq/blob/4fe8583396191c22011350248119db98ec1b5cb8/fairseq/modules/espnet_multihead_attention.py#L127-L128\r\n\r\nSince `torch.Tensor` creates an un-initialised tensor, its use results in non-deterministic behaviour, poor initialisation, and nans if you have an unlucky init:\r\n```python\r\n>>>import torch\r\n>>> torch.Tensor(1, 1).sum()\r\ntensor(0.)\r\n>>> torch.Tensor(10, 10).sum()\r\ntensor(4.9312e-38)\r\n>>> torch.Tensor(100, 100).sum()\r\ntensor(nan)\r\n```\r\nSection 3.3 of the referenced paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/pdf/1901.02860.pdf) does not specify the initialisation for the bias terms. Since `torch.Tensor` is typically populated with zeros, setting the initialisation to `torch.zeros` should provide a fix that is close to the intended behaviour.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Shigeki Karita\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Multi-Head Attention layer definition.\"\"\"\n\nimport math\nimport torch\nfrom torch import nn\nfrom fairseq.modules.rotary_positional_embedding import (\n RotaryPositionalEmbedding,\n apply_rotary_pos_emb,\n)\n\n\nclass ESPNETMultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n Args:\n n_head: The number of heads.\n n_feat: The number of features.\n dropout: Dropout rate.\n \"\"\"\n\n def __init__(self, n_feat, n_head, dropout):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(ESPNETMultiHeadedAttention, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward_qkv(self, query, key, value, **kwargs):\n \"\"\"Transform query, key and value.\n Args:\n query: Query tensor B X T1 X C\n key: Key tensor B X T2 X C\n value: Value tensor B X T2 X C\n Returns:\n torch.Tensor: Transformed query tensor B X n_head X T1 X d_k\n torch.Tensor: Transformed key tensor B X n_head X T2 X d_k\n torch.Tensor: Transformed value tensor B X n_head X T2 X d_k\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n return q, k, v\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n Args:\n value: Transformed value B X n_head X T2 X d_k.\n scores: Attention score B X n_head X T1 X T2\n mask: Mask T2 X B\n Returns:\n torch.Tensor: Transformed value B X T1 X d_model\n weighted by the attention score B X T1 X T2\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n scores = scores.masked_fill(\n mask.unsqueeze(1).unsqueeze(2).to(bool),\n float(\"-inf\"), # (batch, head, time1, time2)\n )\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, query, key, value, key_padding_mask=None, **kwargs):\n \"\"\"Compute scaled dot product attention.\n Args:\n query (torch.Tensor): Query tensor T X B X C\n key (torch.Tensor): Key tensor T X B X C\n value (torch.Tensor): Value tensor T X B X C\n mask (torch.Tensor): Mask tensor T X B\n Returns:\n torch.Tensor: Output tensor T X B X D.\n \"\"\"\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n scores = self.forward_attention(v, scores, key_padding_mask)\n scores = scores.transpose(0, 1)\n return scores, None\n\n\nclass RelPositionMultiHeadedAttention(ESPNETMultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding.\n Paper: https://arxiv.org/abs/1901.02860\n Args:\n n_head: The number of heads.\n n_feat: The number of features.\n dropout: Dropout rate.\n zero_triu: Whether to zero the upper triangular part of attention matrix.\n \"\"\"\n\n def __init__(self, n_feat, n_head, dropout, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_feat, n_head, dropout)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n Args:\n x: Input tensor B X n_head X T X 2T-1\n Returns:\n torch.Tensor: Output tensor.\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)[\n :, :, :, : x.size(-1) // 2 + 1\n ] # only keep the positions from 0 to time2\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, key_padding_mask=None, **kwargs):\n \"\"\"Compute scaled dot product attention.\n Args:\n query: Query tensor T X B X C\n key: Key tensor T X B X C\n value: Value tensor T X B X C\n pos_emb: Positional embedding tensor B X 2T-1 X C\n key_padding_mask: Mask tensor T X B\n Returns:\n torch.Tensor: Output tensor T X B X C.\n \"\"\"\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n pos_emb = pos_emb.transpose(0, 1)\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, 2*time1-1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n scores = self.forward_attention(v, scores, key_padding_mask)\n scores = scores.transpose(0, 1)\n return scores, None\n\n\nclass RotaryPositionMultiHeadedAttention(ESPNETMultiHeadedAttention):\n def __init__(\n self,\n n_feat,\n n_head,\n dropout,\n precision,\n rotary_emd_base=10000,\n ):\n \"\"\"Construct an RotaryPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_feat, n_head, dropout)\n precision = torch.float\n self.rotary_ndims = self.d_k # also try self.d_k//2\n if precision == \"fp16\":\n precision = torch.half\n\n self.rotary_emb = RotaryPositionalEmbedding(\n self.rotary_ndims, base=rotary_emd_base, precision=precision\n )\n\n def forward(self, query, key, value, key_padding_mask=None, **kwargs):\n \"\"\"Compute rotary position attention.\n Args:\n query: Query tensor T X B X C\n key: Key tensor T X B X C\n value: Value tensor T X B X C\n key_padding_mask: Mask tensor T X B\n Returns:\n torch.Tensor: Output tensor T X B X D.\n Notes:\n Assumes self attn\n \"\"\"\n\n T, B, C = value.size()\n query = query.view(T, B, self.h, self.d_k)\n key = key.view(T, B, self.h, self.d_k)\n value = value.view(T, B, self.h, self.d_k)\n cos, sin = self.rotary_emb(value, seq_len=T)\n query, key = apply_rotary_pos_emb(\n query, key, cos, sin, offset=0\n ) # offset is based on layer_past\n\n query = query.view(T, B, self.h * self.d_k)\n key = key.view(T, B, self.h * self.d_k)\n value = value.view(T, B, self.h * self.d_k)\n\n # TBD to BTD\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n scores = self.forward_attention(v, scores, key_padding_mask)\n scores = scores.transpose(0, 1)\n return scores, None\n", "path": "fairseq/modules/espnet_multihead_attention.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Shigeki Karita\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Multi-Head Attention layer definition.\"\"\"\n\nimport math\nimport torch\nfrom torch import nn\nfrom fairseq.modules.rotary_positional_embedding import (\n RotaryPositionalEmbedding,\n apply_rotary_pos_emb,\n)\n\n\nclass ESPNETMultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n Args:\n n_head: The number of heads.\n n_feat: The number of features.\n dropout: Dropout rate.\n \"\"\"\n\n def __init__(self, n_feat, n_head, dropout):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(ESPNETMultiHeadedAttention, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward_qkv(self, query, key, value, **kwargs):\n \"\"\"Transform query, key and value.\n Args:\n query: Query tensor B X T1 X C\n key: Key tensor B X T2 X C\n value: Value tensor B X T2 X C\n Returns:\n torch.Tensor: Transformed query tensor B X n_head X T1 X d_k\n torch.Tensor: Transformed key tensor B X n_head X T2 X d_k\n torch.Tensor: Transformed value tensor B X n_head X T2 X d_k\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n return q, k, v\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n Args:\n value: Transformed value B X n_head X T2 X d_k.\n scores: Attention score B X n_head X T1 X T2\n mask: Mask T2 X B\n Returns:\n torch.Tensor: Transformed value B X T1 X d_model\n weighted by the attention score B X T1 X T2\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n scores = scores.masked_fill(\n mask.unsqueeze(1).unsqueeze(2).to(bool),\n float(\"-inf\"), # (batch, head, time1, time2)\n )\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, query, key, value, key_padding_mask=None, **kwargs):\n \"\"\"Compute scaled dot product attention.\n Args:\n query (torch.Tensor): Query tensor T X B X C\n key (torch.Tensor): Key tensor T X B X C\n value (torch.Tensor): Value tensor T X B X C\n mask (torch.Tensor): Mask tensor T X B\n Returns:\n torch.Tensor: Output tensor T X B X D.\n \"\"\"\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n scores = self.forward_attention(v, scores, key_padding_mask)\n scores = scores.transpose(0, 1)\n return scores, None\n\n\nclass RelPositionMultiHeadedAttention(ESPNETMultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding.\n Paper: https://arxiv.org/abs/1901.02860\n Args:\n n_head: The number of heads.\n n_feat: The number of features.\n dropout: Dropout rate.\n zero_triu: Whether to zero the upper triangular part of attention matrix.\n \"\"\"\n\n def __init__(self, n_feat, n_head, dropout, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_feat, n_head, dropout)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.zeros(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.zeros(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n Args:\n x: Input tensor B X n_head X T X 2T-1\n Returns:\n torch.Tensor: Output tensor.\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)[\n :, :, :, : x.size(-1) // 2 + 1\n ] # only keep the positions from 0 to time2\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, key_padding_mask=None, **kwargs):\n \"\"\"Compute scaled dot product attention.\n Args:\n query: Query tensor T X B X C\n key: Key tensor T X B X C\n value: Value tensor T X B X C\n pos_emb: Positional embedding tensor B X 2T-1 X C\n key_padding_mask: Mask tensor T X B\n Returns:\n torch.Tensor: Output tensor T X B X C.\n \"\"\"\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n pos_emb = pos_emb.transpose(0, 1)\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, 2*time1-1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n scores = self.forward_attention(v, scores, key_padding_mask)\n scores = scores.transpose(0, 1)\n return scores, None\n\n\nclass RotaryPositionMultiHeadedAttention(ESPNETMultiHeadedAttention):\n def __init__(\n self,\n n_feat,\n n_head,\n dropout,\n precision,\n rotary_emd_base=10000,\n ):\n \"\"\"Construct an RotaryPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_feat, n_head, dropout)\n precision = torch.float\n self.rotary_ndims = self.d_k # also try self.d_k//2\n if precision == \"fp16\":\n precision = torch.half\n\n self.rotary_emb = RotaryPositionalEmbedding(\n self.rotary_ndims, base=rotary_emd_base, precision=precision\n )\n\n def forward(self, query, key, value, key_padding_mask=None, **kwargs):\n \"\"\"Compute rotary position attention.\n Args:\n query: Query tensor T X B X C\n key: Key tensor T X B X C\n value: Value tensor T X B X C\n key_padding_mask: Mask tensor T X B\n Returns:\n torch.Tensor: Output tensor T X B X D.\n Notes:\n Assumes self attn\n \"\"\"\n\n T, B, C = value.size()\n query = query.view(T, B, self.h, self.d_k)\n key = key.view(T, B, self.h, self.d_k)\n value = value.view(T, B, self.h, self.d_k)\n cos, sin = self.rotary_emb(value, seq_len=T)\n query, key = apply_rotary_pos_emb(\n query, key, cos, sin, offset=0\n ) # offset is based on layer_past\n\n query = query.view(T, B, self.h * self.d_k)\n key = key.view(T, B, self.h * self.d_k)\n value = value.view(T, B, self.h * self.d_k)\n\n # TBD to BTD\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n scores = self.forward_attention(v, scores, key_padding_mask)\n scores = scores.transpose(0, 1)\n return scores, None\n", "path": "fairseq/modules/espnet_multihead_attention.py"}]} | 3,897 | 229 |
gh_patches_debug_4962 | rasdani/github-patches | git_diff | projectmesa__mesa-128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Grid move_agent to same position
Grid's move_agent method errors out when trying to move the agent to the same position.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesa/space.py`
Content:
```
1 '''
2 Mesa Space Module
3 =================================
4
5 Objects used to add a spatial component to a model.
6
7 Grid: base grid, a simple list-of-lists.
8 SingleGrid: grid which strictly enforces one object per cell.
9 MultiGrid: extension to Grid where each cell is a set of objects.
10
11 '''
12 # Instruction for PyLint to suppress variable name errors, since we have a
13 # good reason to use one-character variable names for x and y.
14 # pylint: disable=invalid-name
15
16 import itertools
17 import random
18
19
20 RANDOM = -1
21
22 X = 0
23 Y = 1
24
25
26 class Grid(object):
27 '''
28 Base class for a square grid.
29
30 Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left
31 and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top
32 and bottom, and left and right, edges wrap to each other
33
34 Properties:
35 width, height: The grid's width and height.
36 torus: Boolean which determines whether to treat the grid as a torus.
37
38 grid: Internal list-of-lists which holds the grid cells themselves.
39 default_val: Lambda function to populate each grid cell with None.
40
41 Methods:
42 get_neighbors: Returns the objects surrounding a given cell.
43 get_neighborhood: Returns the cells surrounding a given cell.
44 get_cell_list_contents: Returns the contents of a list of cells
45 ((x,y) tuples)
46 '''
47
48 default_val = lambda s: None
49
50 class CoordIter:
51 """
52 An iterator that returns the coordinates of a cell along with its
53 contents.
54 """
55
56 def __init__(self, grid):
57 self.grid = grid
58 self.x = 0
59 self.y = 0
60
61 def __iter__(self):
62 return self
63
64 def __next__(self):
65 while self.y < self.grid.height:
66 while self.x < self.grid.width:
67 ret = [self.grid[self.y][self.x],
68 self.x, self.y]
69 self.x += 1
70 return ret
71 self.x = 0
72 self.y += 1
73 else:
74 raise StopIteration()
75
76 def __init__(self, height, width, torus):
77 '''
78 Create a new grid.
79
80 Args:
81 height, width: The height and width of the grid
82 torus: Boolean whether the grid wraps or not.
83 '''
84 self.height = height
85 self.width = width
86 self.torus = torus
87
88 self.grid = []
89 self.empties = []
90
91 for y in range(self.height):
92 row = []
93 for x in range(self.width):
94 row.append(self.default_val())
95 self.grid.append(row)
96
97 def __getitem__(self, index):
98 return self.grid[index]
99
100 def __iter__(self):
101 # create an iterator that chains the
102 # rows of grid together as if one list:
103 return itertools.chain(*self.grid)
104
105 def coord_iter(self):
106 """
107 An iterator that returns coordinates as well as cell contents.
108 """
109 return Grid.CoordIter(self)
110
111 def neighbor_iter(self, x, y, moore=True, torus=False):
112 """
113 Iterate over our neighbors.
114 """
115 neighbors = self.get_neighbors(x, y, moore=moore)
116 return iter(neighbors)
117
118 def get_neighborhood(self, x, y, moore,
119 include_center=False, radius=1):
120 """
121 Return a list of cells that are in the
122 neighborhood of a certain point.
123
124 Args:
125 x, y: Coordinates for the neighborhood to get.
126 moore: If True, return Moore neighborhood
127 (including diagonals)
128 If False, return Von Neumann neighborhood
129 (exclude diagonals)
130 include_center: If True, return the (x, y) cell as well.
131 Otherwise, return surrounding cells only.
132 radius: radius, in cells, of neighborhood to get.
133
134 Returns:
135 A list of coordinate tuples representing the neighborhood;
136 With radius 1, at most 9 if
137 Moore, 5 if Von Neumann
138 (8 and 4 if not including the center).
139 """
140 coordinates = set()
141 for dy in range(-radius, radius + 1):
142 for dx in range(-radius, radius + 1):
143 if dx == 0 and dy == 0 and not include_center:
144 continue
145 if not moore:
146 # Skip diagonals in Von Neumann neighborhood.
147 if dy != 0 and dx != 0:
148 continue
149
150 px = self.torus_adj(x + dx, self.width)
151 py = self.torus_adj(y + dy, self.height)
152
153 # Skip if new coords out of bounds.
154 if(self.out_of_bounds(px, py)):
155 continue
156
157 coordinates.add((px, py))
158 return list(coordinates)
159
160 def get_neighbors(self, x, y, moore,
161 include_center=False, radius=1):
162 """
163 Return a list of neighbors to a certain point.
164
165 Args:
166 x, y: Coordinates for the neighborhood to get.
167 moore: If True, return Moore neighborhood
168 (including diagonals)
169 If False, return Von Neumann neighborhood
170 (exclude diagonals)
171 include_center: If True, return the (x, y) cell as well.
172 Otherwise,
173 return surrounding cells only.
174 radius: radius, in cells, of neighborhood to get.
175
176 Returns:
177 A list of non-None objects in the given neighborhood;
178 at most 9 if Moore, 5 if Von-Neumann
179 (8 and 4 if not including the center).
180 """
181 neighborhood = self.get_neighborhood(x, y, moore,
182 include_center,
183 radius)
184 return self.get_cell_list_contents(neighborhood)
185
186 def torus_adj(self, coord, dim_len):
187 """
188 Convert coordinate, handling torus looping.
189 """
190 if self.torus:
191 coord %= dim_len
192 return coord
193
194 def out_of_bounds(self, x, y):
195 """
196 Is point x, y off the grid?
197 """
198 return(x < 0 or x >= self.width
199 or y < 0 or y >= self.height)
200
201 def get_cell_list_contents(self, cell_list):
202 '''
203 Args:
204 cell_list: Array-like of (x, y) tuples
205
206 Returns:
207 A list of the contents of the cells identified in cell_list
208 '''
209 contents = []
210 for x, y in cell_list:
211 self._add_members(contents, x, y)
212 return contents
213
214 def move_agent(self, agent, pos):
215 '''
216 Move an agent from its current position to a new position.
217
218 Args:
219 agent: Agent object to move. Assumed to have its current location
220 stored in a 'pos' tuple.
221 pos: Tuple of new position to move the agent to.
222 '''
223
224 self._place_agent(pos, agent)
225 self._remove_agent(agent.pos, agent)
226 agent.pos = pos
227
228 def place_agent(self, agent, pos):
229 '''
230 Position an agent on the grid, and set its pos variable.
231 '''
232 self._place_agent(pos, agent)
233 agent.pos = pos
234
235 def _place_agent(self, coords, agent):
236 '''
237 Place the agent at the correct location.
238 '''
239 x, y = coords
240 self.grid[y][x] = agent
241
242 def _remove_agent(self, coords, agent):
243 '''
244 Remove the agent from the given location.
245 '''
246 x, y = coords
247 self.grid[y][x] = None
248
249 def _add_members(self, target_list, x, y):
250 '''
251 Helper method to append the contents of a cell to the given list.
252 Override for other grid types.
253 '''
254 if self.grid[y][x] is not None:
255 target_list.append(self.grid[y][x])
256
257 def is_cell_empty(self, coords):
258 x, y = coords
259 return True if self.grid[y][x] == self.default_val() else False
260
261
262 class SingleGrid(Grid):
263 '''
264 Grid where each cell contains exactly at most one object.
265 '''
266
267 def __init__(self, height, width, torus):
268 '''
269 Create a new single-item grid.
270
271 Args:
272 height, width: The height and width of the grid
273 torus: Boolean whether the grid wraps or not.
274 '''
275 super().__init__(height, width, torus)
276 # Add all cells to the empties list.
277 self.empties = list(itertools.product(
278 *(range(self.width), range(self.height))))
279
280 def move_to_empty(self, agent):
281 """
282 Moves agent to a random empty cell, vacating agent's old cell.
283 """
284 coords = agent.pos
285 new_coords = self.find_empty()
286 if new_coords is None:
287 raise Exception("ERROR: No empty cells")
288 else:
289 self._place_agent(new_coords, agent)
290 agent.pos = new_coords
291 self._remove_agent(coords, agent)
292
293 def find_empty(self):
294 '''
295 Pick a random empty cell.
296 '''
297 if self.exists_empty_cells():
298 coords = random.choice(self.empties)
299 return coords
300 else:
301 return None
302
303 def exists_empty_cells(self):
304 """
305 Return True if any cells empty else False.
306 """
307 return len(self.empties) > 0
308
309 def position_agent(self, agent, x=RANDOM, y=RANDOM):
310 """
311 Position an agent on the grid.
312 This is used when first placing agents! Use 'move_to_empty()'
313 when you want agents to jump to an empty cell.
314 Use 'swap_pos()' to swap agents positions.
315 If x or y are positive, they are used, but if RANDOM,
316 we get a random position.
317 Ensure this random position is not occupied (in Grid).
318 """
319 if x == RANDOM or y == RANDOM:
320 coords = self.find_empty()
321 if coords is None:
322 raise Exception("ERROR: Grid full")
323 else:
324 coords = (x, y)
325 agent.pos = coords
326 self._place_agent(coords, agent)
327
328 def _place_agent(self, coords, agent):
329 if self.is_cell_empty(coords):
330 super()._place_agent(coords, agent)
331 self.empties.remove(coords)
332 else:
333 raise Exception("Cell not empty")
334
335 def _remove_agent(self, coords, agent):
336 super()._remove_agent(coords, agent)
337 self.empties.append(coords)
338
339
340 class MultiGrid(Grid):
341 '''
342 Grid where each cell can contain more than one object.
343
344 Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left
345 and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top
346 and bottom, and left and right, edges wrap to each other.
347
348 Each grid cell holds a set object.
349
350 Properties:
351 width, height: The grid's width and height.
352
353 torus: Boolean which determines whether to treat the grid as a torus.
354
355 grid: Internal list-of-lists which holds the grid cells themselves.
356 default_val: Lambda function to populate grid cells with an empty set.
357
358 Methods:
359 get_neighbors: Returns the objects surrounding a given cell.
360 '''
361
362 default_val = lambda s: set()
363
364 def _place_agent(self, coords, agent):
365 '''
366 Place the agent at the correct location.
367 '''
368 x, y = coords
369 self.grid[y][x].add(agent)
370
371 def _remove_agent(self, coords, agent):
372 '''
373 Remove the agent from the given location.
374 '''
375 x, y = coords
376 self.grid[y][x].remove(agent)
377
378 def _add_members(self, target_list, x, y):
379 '''
380 Helper method to add all objects in the given cell to the target_list.
381 '''
382 for a in self.grid[y][x]:
383 target_list.append(a)
384
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesa/space.py b/mesa/space.py
--- a/mesa/space.py
+++ b/mesa/space.py
@@ -220,9 +220,8 @@
stored in a 'pos' tuple.
pos: Tuple of new position to move the agent to.
'''
-
- self._place_agent(pos, agent)
self._remove_agent(agent.pos, agent)
+ self._place_agent(pos, agent)
agent.pos = pos
def place_agent(self, agent, pos):
| {"golden_diff": "diff --git a/mesa/space.py b/mesa/space.py\n--- a/mesa/space.py\n+++ b/mesa/space.py\n@@ -220,9 +220,8 @@\n stored in a 'pos' tuple.\n pos: Tuple of new position to move the agent to.\n '''\n-\n- self._place_agent(pos, agent)\n self._remove_agent(agent.pos, agent)\n+ self._place_agent(pos, agent)\n agent.pos = pos\n \n def place_agent(self, agent, pos):\n", "issue": "Grid move_agent to same position\nGrid's move_agent method errors out when trying to move the agent to the same position.\n\n", "before_files": [{"content": "'''\nMesa Space Module\n=================================\n\nObjects used to add a spatial component to a model.\n\nGrid: base grid, a simple list-of-lists.\nSingleGrid: grid which strictly enforces one object per cell.\nMultiGrid: extension to Grid where each cell is a set of objects.\n\n'''\n# Instruction for PyLint to suppress variable name errors, since we have a\n# good reason to use one-character variable names for x and y.\n# pylint: disable=invalid-name\n\nimport itertools\nimport random\n\n\nRANDOM = -1\n\nX = 0\nY = 1\n\n\nclass Grid(object):\n '''\n Base class for a square grid.\n\n Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left\n and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top\n and bottom, and left and right, edges wrap to each other\n\n Properties:\n width, height: The grid's width and height.\n torus: Boolean which determines whether to treat the grid as a torus.\n\n grid: Internal list-of-lists which holds the grid cells themselves.\n default_val: Lambda function to populate each grid cell with None.\n\n Methods:\n get_neighbors: Returns the objects surrounding a given cell.\n get_neighborhood: Returns the cells surrounding a given cell.\n get_cell_list_contents: Returns the contents of a list of cells\n ((x,y) tuples)\n '''\n\n default_val = lambda s: None\n\n class CoordIter:\n \"\"\"\n An iterator that returns the coordinates of a cell along with its\n contents.\n \"\"\"\n\n def __init__(self, grid):\n self.grid = grid\n self.x = 0\n self.y = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while self.y < self.grid.height:\n while self.x < self.grid.width:\n ret = [self.grid[self.y][self.x],\n self.x, self.y]\n self.x += 1\n return ret\n self.x = 0\n self.y += 1\n else:\n raise StopIteration()\n\n def __init__(self, height, width, torus):\n '''\n Create a new grid.\n\n Args:\n height, width: The height and width of the grid\n torus: Boolean whether the grid wraps or not.\n '''\n self.height = height\n self.width = width\n self.torus = torus\n\n self.grid = []\n self.empties = []\n\n for y in range(self.height):\n row = []\n for x in range(self.width):\n row.append(self.default_val())\n self.grid.append(row)\n\n def __getitem__(self, index):\n return self.grid[index]\n\n def __iter__(self):\n # create an iterator that chains the\n # rows of grid together as if one list:\n return itertools.chain(*self.grid)\n\n def coord_iter(self):\n \"\"\"\n An iterator that returns coordinates as well as cell contents.\n \"\"\"\n return Grid.CoordIter(self)\n\n def neighbor_iter(self, x, y, moore=True, torus=False):\n \"\"\"\n Iterate over our neighbors.\n \"\"\"\n neighbors = self.get_neighbors(x, y, moore=moore)\n return iter(neighbors)\n\n def get_neighborhood(self, x, y, moore,\n include_center=False, radius=1):\n \"\"\"\n Return a list of cells that are in the\n neighborhood of a certain point.\n\n Args:\n x, y: Coordinates for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise, return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of coordinate tuples representing the neighborhood;\n With radius 1, at most 9 if\n Moore, 5 if Von Neumann\n (8 and 4 if not including the center).\n \"\"\"\n coordinates = set()\n for dy in range(-radius, radius + 1):\n for dx in range(-radius, radius + 1):\n if dx == 0 and dy == 0 and not include_center:\n continue\n if not moore:\n # Skip diagonals in Von Neumann neighborhood.\n if dy != 0 and dx != 0:\n continue\n\n px = self.torus_adj(x + dx, self.width)\n py = self.torus_adj(y + dy, self.height)\n\n # Skip if new coords out of bounds.\n if(self.out_of_bounds(px, py)):\n continue\n\n coordinates.add((px, py))\n return list(coordinates)\n\n def get_neighbors(self, x, y, moore,\n include_center=False, radius=1):\n \"\"\"\n Return a list of neighbors to a certain point.\n\n Args:\n x, y: Coordinates for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise,\n return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of non-None objects in the given neighborhood;\n at most 9 if Moore, 5 if Von-Neumann\n (8 and 4 if not including the center).\n \"\"\"\n neighborhood = self.get_neighborhood(x, y, moore,\n include_center,\n radius)\n return self.get_cell_list_contents(neighborhood)\n\n def torus_adj(self, coord, dim_len):\n \"\"\"\n Convert coordinate, handling torus looping.\n \"\"\"\n if self.torus:\n coord %= dim_len\n return coord\n\n def out_of_bounds(self, x, y):\n \"\"\"\n Is point x, y off the grid?\n \"\"\"\n return(x < 0 or x >= self.width\n or y < 0 or y >= self.height)\n\n def get_cell_list_contents(self, cell_list):\n '''\n Args:\n cell_list: Array-like of (x, y) tuples\n\n Returns:\n A list of the contents of the cells identified in cell_list\n '''\n contents = []\n for x, y in cell_list:\n self._add_members(contents, x, y)\n return contents\n\n def move_agent(self, agent, pos):\n '''\n Move an agent from its current position to a new position.\n\n Args:\n agent: Agent object to move. Assumed to have its current location\n stored in a 'pos' tuple.\n pos: Tuple of new position to move the agent to.\n '''\n\n self._place_agent(pos, agent)\n self._remove_agent(agent.pos, agent)\n agent.pos = pos\n\n def place_agent(self, agent, pos):\n '''\n Position an agent on the grid, and set its pos variable.\n '''\n self._place_agent(pos, agent)\n agent.pos = pos\n\n def _place_agent(self, coords, agent):\n '''\n Place the agent at the correct location.\n '''\n x, y = coords\n self.grid[y][x] = agent\n\n def _remove_agent(self, coords, agent):\n '''\n Remove the agent from the given location.\n '''\n x, y = coords\n self.grid[y][x] = None\n\n def _add_members(self, target_list, x, y):\n '''\n Helper method to append the contents of a cell to the given list.\n Override for other grid types.\n '''\n if self.grid[y][x] is not None:\n target_list.append(self.grid[y][x])\n\n def is_cell_empty(self, coords):\n x, y = coords\n return True if self.grid[y][x] == self.default_val() else False\n\n\nclass SingleGrid(Grid):\n '''\n Grid where each cell contains exactly at most one object.\n '''\n\n def __init__(self, height, width, torus):\n '''\n Create a new single-item grid.\n\n Args:\n height, width: The height and width of the grid\n torus: Boolean whether the grid wraps or not.\n '''\n super().__init__(height, width, torus)\n # Add all cells to the empties list.\n self.empties = list(itertools.product(\n *(range(self.width), range(self.height))))\n\n def move_to_empty(self, agent):\n \"\"\"\n Moves agent to a random empty cell, vacating agent's old cell.\n \"\"\"\n coords = agent.pos\n new_coords = self.find_empty()\n if new_coords is None:\n raise Exception(\"ERROR: No empty cells\")\n else:\n self._place_agent(new_coords, agent)\n agent.pos = new_coords\n self._remove_agent(coords, agent)\n\n def find_empty(self):\n '''\n Pick a random empty cell.\n '''\n if self.exists_empty_cells():\n coords = random.choice(self.empties)\n return coords\n else:\n return None\n\n def exists_empty_cells(self):\n \"\"\"\n Return True if any cells empty else False.\n \"\"\"\n return len(self.empties) > 0\n\n def position_agent(self, agent, x=RANDOM, y=RANDOM):\n \"\"\"\n Position an agent on the grid.\n This is used when first placing agents! Use 'move_to_empty()'\n when you want agents to jump to an empty cell.\n Use 'swap_pos()' to swap agents positions.\n If x or y are positive, they are used, but if RANDOM,\n we get a random position.\n Ensure this random position is not occupied (in Grid).\n \"\"\"\n if x == RANDOM or y == RANDOM:\n coords = self.find_empty()\n if coords is None:\n raise Exception(\"ERROR: Grid full\")\n else:\n coords = (x, y)\n agent.pos = coords\n self._place_agent(coords, agent)\n\n def _place_agent(self, coords, agent):\n if self.is_cell_empty(coords):\n super()._place_agent(coords, agent)\n self.empties.remove(coords)\n else:\n raise Exception(\"Cell not empty\")\n\n def _remove_agent(self, coords, agent):\n super()._remove_agent(coords, agent)\n self.empties.append(coords)\n\n\nclass MultiGrid(Grid):\n '''\n Grid where each cell can contain more than one object.\n\n Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left\n and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top\n and bottom, and left and right, edges wrap to each other.\n\n Each grid cell holds a set object.\n\n Properties:\n width, height: The grid's width and height.\n\n torus: Boolean which determines whether to treat the grid as a torus.\n\n grid: Internal list-of-lists which holds the grid cells themselves.\n default_val: Lambda function to populate grid cells with an empty set.\n\n Methods:\n get_neighbors: Returns the objects surrounding a given cell.\n '''\n\n default_val = lambda s: set()\n\n def _place_agent(self, coords, agent):\n '''\n Place the agent at the correct location.\n '''\n x, y = coords\n self.grid[y][x].add(agent)\n\n def _remove_agent(self, coords, agent):\n '''\n Remove the agent from the given location.\n '''\n x, y = coords\n self.grid[y][x].remove(agent)\n\n def _add_members(self, target_list, x, y):\n '''\n Helper method to add all objects in the given cell to the target_list.\n '''\n for a in self.grid[y][x]:\n target_list.append(a)\n", "path": "mesa/space.py"}], "after_files": [{"content": "'''\nMesa Space Module\n=================================\n\nObjects used to add a spatial component to a model.\n\nGrid: base grid, a simple list-of-lists.\nSingleGrid: grid which strictly enforces one object per cell.\nMultiGrid: extension to Grid where each cell is a set of objects.\n\n'''\n# Instruction for PyLint to suppress variable name errors, since we have a\n# good reason to use one-character variable names for x and y.\n# pylint: disable=invalid-name\n\nimport itertools\nimport random\n\n\nRANDOM = -1\n\nX = 0\nY = 1\n\n\nclass Grid(object):\n '''\n Base class for a square grid.\n\n Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left\n and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top\n and bottom, and left and right, edges wrap to each other\n\n Properties:\n width, height: The grid's width and height.\n torus: Boolean which determines whether to treat the grid as a torus.\n\n grid: Internal list-of-lists which holds the grid cells themselves.\n default_val: Lambda function to populate each grid cell with None.\n\n Methods:\n get_neighbors: Returns the objects surrounding a given cell.\n get_neighborhood: Returns the cells surrounding a given cell.\n get_cell_list_contents: Returns the contents of a list of cells\n ((x,y) tuples)\n '''\n\n default_val = lambda s: None\n\n class CoordIter:\n \"\"\"\n An iterator that returns the coordinates of a cell along with its\n contents.\n \"\"\"\n\n def __init__(self, grid):\n self.grid = grid\n self.x = 0\n self.y = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while self.y < self.grid.height:\n while self.x < self.grid.width:\n ret = [self.grid[self.y][self.x],\n self.x, self.y]\n self.x += 1\n return ret\n self.x = 0\n self.y += 1\n else:\n raise StopIteration()\n\n def __init__(self, height, width, torus):\n '''\n Create a new grid.\n\n Args:\n height, width: The height and width of the grid\n torus: Boolean whether the grid wraps or not.\n '''\n self.height = height\n self.width = width\n self.torus = torus\n\n self.grid = []\n self.empties = []\n\n for y in range(self.height):\n row = []\n for x in range(self.width):\n row.append(self.default_val())\n self.grid.append(row)\n\n def __getitem__(self, index):\n return self.grid[index]\n\n def __iter__(self):\n # create an iterator that chains the\n # rows of grid together as if one list:\n return itertools.chain(*self.grid)\n\n def coord_iter(self):\n \"\"\"\n An iterator that returns coordinates as well as cell contents.\n \"\"\"\n return Grid.CoordIter(self)\n\n def neighbor_iter(self, x, y, moore=True, torus=False):\n \"\"\"\n Iterate over our neighbors.\n \"\"\"\n neighbors = self.get_neighbors(x, y, moore=moore)\n return iter(neighbors)\n\n def get_neighborhood(self, x, y, moore,\n include_center=False, radius=1):\n \"\"\"\n Return a list of cells that are in the\n neighborhood of a certain point.\n\n Args:\n x, y: Coordinates for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise, return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of coordinate tuples representing the neighborhood;\n With radius 1, at most 9 if\n Moore, 5 if Von Neumann\n (8 and 4 if not including the center).\n \"\"\"\n coordinates = set()\n for dy in range(-radius, radius + 1):\n for dx in range(-radius, radius + 1):\n if dx == 0 and dy == 0 and not include_center:\n continue\n if not moore:\n # Skip diagonals in Von Neumann neighborhood.\n if dy != 0 and dx != 0:\n continue\n\n px = self.torus_adj(x + dx, self.width)\n py = self.torus_adj(y + dy, self.height)\n\n # Skip if new coords out of bounds.\n if(self.out_of_bounds(px, py)):\n continue\n\n coordinates.add((px, py))\n return list(coordinates)\n\n def get_neighbors(self, x, y, moore,\n include_center=False, radius=1):\n \"\"\"\n Return a list of neighbors to a certain point.\n\n Args:\n x, y: Coordinates for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise,\n return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of non-None objects in the given neighborhood;\n at most 9 if Moore, 5 if Von-Neumann\n (8 and 4 if not including the center).\n \"\"\"\n neighborhood = self.get_neighborhood(x, y, moore,\n include_center,\n radius)\n return self.get_cell_list_contents(neighborhood)\n\n def torus_adj(self, coord, dim_len):\n \"\"\"\n Convert coordinate, handling torus looping.\n \"\"\"\n if self.torus:\n coord %= dim_len\n return coord\n\n def out_of_bounds(self, x, y):\n \"\"\"\n Is point x, y off the grid?\n \"\"\"\n return(x < 0 or x >= self.width\n or y < 0 or y >= self.height)\n\n def get_cell_list_contents(self, cell_list):\n '''\n Args:\n cell_list: Array-like of (x, y) tuples\n\n Returns:\n A list of the contents of the cells identified in cell_list\n '''\n contents = []\n for x, y in cell_list:\n self._add_members(contents, x, y)\n return contents\n\n def move_agent(self, agent, pos):\n '''\n Move an agent from its current position to a new position.\n\n Args:\n agent: Agent object to move. Assumed to have its current location\n stored in a 'pos' tuple.\n pos: Tuple of new position to move the agent to.\n '''\n self._remove_agent(agent.pos, agent)\n self._place_agent(pos, agent)\n agent.pos = pos\n\n def place_agent(self, agent, pos):\n '''\n Position an agent on the grid, and set its pos variable.\n '''\n self._place_agent(pos, agent)\n agent.pos = pos\n\n def _place_agent(self, coords, agent):\n '''\n Place the agent at the correct location.\n '''\n x, y = coords\n self.grid[y][x] = agent\n\n def _remove_agent(self, coords, agent):\n '''\n Remove the agent from the given location.\n '''\n x, y = coords\n self.grid[y][x] = None\n\n def _add_members(self, target_list, x, y):\n '''\n Helper method to append the contents of a cell to the given list.\n Override for other grid types.\n '''\n if self.grid[y][x] is not None:\n target_list.append(self.grid[y][x])\n\n def is_cell_empty(self, coords):\n x, y = coords\n return True if self.grid[y][x] == self.default_val() else False\n\n\nclass SingleGrid(Grid):\n '''\n Grid where each cell contains exactly at most one object.\n '''\n\n def __init__(self, height, width, torus):\n '''\n Create a new single-item grid.\n\n Args:\n height, width: The height and width of the grid\n torus: Boolean whether the grid wraps or not.\n '''\n super().__init__(height, width, torus)\n # Add all cells to the empties list.\n self.empties = list(itertools.product(\n *(range(self.width), range(self.height))))\n\n def move_to_empty(self, agent):\n \"\"\"\n Moves agent to a random empty cell, vacating agent's old cell.\n \"\"\"\n coords = agent.pos\n new_coords = self.find_empty()\n if new_coords is None:\n raise Exception(\"ERROR: No empty cells\")\n else:\n self._place_agent(new_coords, agent)\n agent.pos = new_coords\n self._remove_agent(coords, agent)\n\n def find_empty(self):\n '''\n Pick a random empty cell.\n '''\n if self.exists_empty_cells():\n coords = random.choice(self.empties)\n return coords\n else:\n return None\n\n def exists_empty_cells(self):\n \"\"\"\n Return True if any cells empty else False.\n \"\"\"\n return len(self.empties) > 0\n\n def position_agent(self, agent, x=RANDOM, y=RANDOM):\n \"\"\"\n Position an agent on the grid.\n This is used when first placing agents! Use 'move_to_empty()'\n when you want agents to jump to an empty cell.\n Use 'swap_pos()' to swap agents positions.\n If x or y are positive, they are used, but if RANDOM,\n we get a random position.\n Ensure this random position is not occupied (in Grid).\n \"\"\"\n if x == RANDOM or y == RANDOM:\n coords = self.find_empty()\n if coords is None:\n raise Exception(\"ERROR: Grid full\")\n else:\n coords = (x, y)\n agent.pos = coords\n self._place_agent(coords, agent)\n\n def _place_agent(self, coords, agent):\n if self.is_cell_empty(coords):\n super()._place_agent(coords, agent)\n self.empties.remove(coords)\n else:\n raise Exception(\"Cell not empty\")\n\n def _remove_agent(self, coords, agent):\n super()._remove_agent(coords, agent)\n self.empties.append(coords)\n\n\nclass MultiGrid(Grid):\n '''\n Grid where each cell can contain more than one object.\n\n Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left\n and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top\n and bottom, and left and right, edges wrap to each other.\n\n Each grid cell holds a set object.\n\n Properties:\n width, height: The grid's width and height.\n\n torus: Boolean which determines whether to treat the grid as a torus.\n\n grid: Internal list-of-lists which holds the grid cells themselves.\n default_val: Lambda function to populate grid cells with an empty set.\n\n Methods:\n get_neighbors: Returns the objects surrounding a given cell.\n '''\n\n default_val = lambda s: set()\n\n def _place_agent(self, coords, agent):\n '''\n Place the agent at the correct location.\n '''\n x, y = coords\n self.grid[y][x].add(agent)\n\n def _remove_agent(self, coords, agent):\n '''\n Remove the agent from the given location.\n '''\n x, y = coords\n self.grid[y][x].remove(agent)\n\n def _add_members(self, target_list, x, y):\n '''\n Helper method to add all objects in the given cell to the target_list.\n '''\n for a in self.grid[y][x]:\n target_list.append(a)\n", "path": "mesa/space.py"}]} | 3,950 | 117 |
gh_patches_debug_14688 | rasdani/github-patches | git_diff | yt-project__yt-3569 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OffAxisProjectionPlot Failed when Log Level Set to Debug
### Bug report
**Bug summary**
If log level set to `debug`, then `OffAxisProjectionPlot` would fail when following the tutorial at [website](https://yt-project.org/doc/visualizing/plots.html#off-axis-projection-plots).
Alhough this can be avoid by setting log level to `info`, it is kind of annoying when debugging.
**Code for reproduction**
```python
import yt
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
L = [1, 1, 0] # vector normal to cutting plane
north_vector = [-1, 1, 0]
prj = yt.OffAxisProjectionPlot(
ds, L, ("gas", "density"), width=(25, "kpc"), north_vector=north_vector
)
prj.save()
```
**Actual outcome**
```
yt : [DEBUG ] 2021-10-15 13:54:47,845 Setting normalized vectors [-0.5 -0.5 -0.5] dimensionless None
Traceback (most recent call last):
File "OffAxisProjectionPlot.py", line 6, in <module>
prj = yt.OffAxisProjectionPlot(
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 2254, in __init__
PWViewerMPL.__init__(
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 873, in __init__
PlotWindow.__init__(self, *args, **kwargs)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 257, in __init__
self._setup_plots()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 994, in _setup_plots
self._recreate_frb()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py", line 317, in _recreate_frb
self._frb._get_data_source_fields()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py", line 176, in _get_data_source_fields
self[f]
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py", line 602, in __getitem__
buff = off_axis_projection(
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/off_axis_projection.py", line 341, in off_axis_projection
camera.set_width(width)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py", line 374, in set_width
self.width = width
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py", line 208, in fset
self.switch_orientation()
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py", line 441, in switch_orientation
self._setup_normalized_vectors(normal_vector, north_vector)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/orientation.py", line 65, in _setup_normalized_vectors
mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector)
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 1422, in debug
self._log(DEBUG, msg, args, **kwargs)
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 1577, in _log
self.handle(record)
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 1586, in handle
if (not self.disabled) and self.filter(record):
File "/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py", line 807, in filter
result = f.filter(record)
File "/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/logger.py", line 78, in filter
if current_log != getattr(self, "last_log", None):
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
**Expected outcome**

**Version Information**
* Operating System: Ubuntu 20.04
* Python Version: 3.8
* yt version: 4.1.dev `yt/main` commit `e387a2`
* Install from source code via pip.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/utilities/orientation.py`
Content:
```
1 import numpy as np
2
3 from yt.funcs import mylog
4 from yt.units.yt_array import YTArray
5 from yt.utilities.exceptions import YTException
6
7
8 def _aligned(a, b):
9 aligned_component = np.abs(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))
10 return np.isclose(aligned_component, 1.0, 1.0e-13)
11
12
13 def _validate_unit_vectors(normal_vector, north_vector):
14
15 # Make sure vectors are unitless
16 if north_vector is not None:
17 north_vector = YTArray(north_vector, "", dtype="float64")
18 if normal_vector is not None:
19 normal_vector = YTArray(normal_vector, "", dtype="float64")
20
21 if not np.dot(normal_vector, normal_vector) > 0:
22 raise YTException("normal_vector cannot be the zero vector.")
23 if north_vector is not None and _aligned(north_vector, normal_vector):
24 raise YTException("normal_vector and north_vector cannot be aligned.")
25
26 return normal_vector, north_vector
27
28
29 class Orientation:
30 def __init__(self, normal_vector, north_vector=None, steady_north=False):
31 r"""An object that returns a set of basis vectors for orienting
32 cameras a data containers.
33
34 Parameters
35 ----------
36 normal_vector : array_like
37 A vector normal to the image plane
38 north_vector : array_like, optional
39 The 'up' direction to orient the image plane.
40 If not specified, gets calculated automatically
41 steady_north : bool, optional
42 Boolean to control whether to normalize the north_vector
43 by subtracting off the dot product of it and the normal
44 vector. Makes it easier to do rotations along a single
45 axis. If north_vector is specified, is switched to
46 True. Default: False
47
48 """
49
50 normal_vector, north_vector = _validate_unit_vectors(
51 normal_vector, north_vector
52 )
53 self.steady_north = steady_north
54 if north_vector is not None:
55 self.steady_north = True
56 self.north_vector = north_vector
57 self._setup_normalized_vectors(normal_vector, north_vector)
58 if self.north_vector is None:
59 self.north_vector = self.unit_vectors[1]
60
61 def _setup_normalized_vectors(self, normal_vector, north_vector):
62 normal_vector, north_vector = _validate_unit_vectors(
63 normal_vector, north_vector
64 )
65 mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector)
66 # Now we set up our various vectors
67 normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
68 if north_vector is None:
69 vecs = np.identity(3)
70 t = np.cross(normal_vector, vecs).sum(axis=1)
71 ax = t.argmax()
72 east_vector = np.cross(vecs[ax, :], normal_vector).ravel()
73 # self.north_vector must remain None otherwise rotations about a fixed axis
74 # will break. The north_vector calculated here will still be included
75 # in self.unit_vectors.
76 north_vector = np.cross(normal_vector, east_vector).ravel()
77 else:
78 if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):
79 north_vector = (
80 north_vector - np.dot(north_vector, normal_vector) * normal_vector
81 )
82 east_vector = np.cross(north_vector, normal_vector).ravel()
83 north_vector /= np.sqrt(np.dot(north_vector, north_vector))
84 east_vector /= np.sqrt(np.dot(east_vector, east_vector))
85 self.normal_vector = normal_vector
86 self.north_vector = north_vector
87 self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], "")
88 self.inv_mat = np.linalg.pinv(self.unit_vectors)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -1,6 +1,5 @@
import numpy as np
-from yt.funcs import mylog
from yt.units.yt_array import YTArray
from yt.utilities.exceptions import YTException
@@ -62,7 +61,6 @@
normal_vector, north_vector = _validate_unit_vectors(
normal_vector, north_vector
)
- mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector)
# Now we set up our various vectors
normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
if north_vector is None:
| {"golden_diff": "diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py\n--- a/yt/utilities/orientation.py\n+++ b/yt/utilities/orientation.py\n@@ -1,6 +1,5 @@\n import numpy as np\n \n-from yt.funcs import mylog\n from yt.units.yt_array import YTArray\n from yt.utilities.exceptions import YTException\n \n@@ -62,7 +61,6 @@\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n- mylog.debug(\"Setting normalized vectors %s %s\", normal_vector, north_vector)\n # Now we set up our various vectors\n normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))\n if north_vector is None:\n", "issue": "OffAxisProjectionPlot Failed when Log Level Set to Debug\n### Bug report\r\n\r\n**Bug summary**\r\nIf log level set to `debug`, then `OffAxisProjectionPlot` would fail when following the tutorial at [website](https://yt-project.org/doc/visualizing/plots.html#off-axis-projection-plots). \r\nAlhough this can be avoid by setting log level to `info`, it is kind of annoying when debugging.\r\n\r\n**Code for reproduction**\r\n\r\n```python\r\nimport yt\r\n\r\nds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\r\nL = [1, 1, 0] # vector normal to cutting plane\r\nnorth_vector = [-1, 1, 0]\r\nprj = yt.OffAxisProjectionPlot(\r\n ds, L, (\"gas\", \"density\"), width=(25, \"kpc\"), north_vector=north_vector\r\n)\r\nprj.save()\r\n```\r\n\r\n**Actual outcome**\r\n\r\n```\r\nyt : [DEBUG ] 2021-10-15 13:54:47,845 Setting normalized vectors [-0.5 -0.5 -0.5] dimensionless None\r\nTraceback (most recent call last):\r\n File \"OffAxisProjectionPlot.py\", line 6, in <module>\r\n prj = yt.OffAxisProjectionPlot(\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 2254, in __init__\r\n PWViewerMPL.__init__(\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 873, in __init__\r\n PlotWindow.__init__(self, *args, **kwargs)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 257, in __init__\r\n self._setup_plots()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 994, in _setup_plots\r\n self._recreate_frb()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/plot_window.py\", line 317, in _recreate_frb\r\n self._frb._get_data_source_fields()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py\", line 176, in _get_data_source_fields\r\n self[f]\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/fixed_resolution.py\", line 602, in __getitem__\r\n buff = off_axis_projection(\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/off_axis_projection.py\", line 341, in off_axis_projection\r\n camera.set_width(width)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py\", line 374, in set_width\r\n self.width = width\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py\", line 208, in fset\r\n self.switch_orientation()\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/visualization/volume_rendering/camera.py\", line 441, in switch_orientation\r\n self._setup_normalized_vectors(normal_vector, north_vector)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/orientation.py\", line 65, in _setup_normalized_vectors\r\n mylog.debug(\"Setting normalized vectors %s %s\", normal_vector, north_vector)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 1422, in debug\r\n self._log(DEBUG, msg, args, **kwargs)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 1577, in _log\r\n self.handle(record)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 1586, in handle\r\n if (not self.disabled) and self.filter(record):\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/logging/__init__.py\", line 807, in filter\r\n result = f.filter(record)\r\n File \"/home/calab912/software/python/python3.8/lib/python3.8/site-packages/yt/utilities/logger.py\", line 78, in filter\r\n if current_log != getattr(self, \"last_log\", None):\r\nValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\r\n```\r\n\r\n**Expected outcome**\r\n\r\n\r\n\r\n**Version Information**\r\n * Operating System: Ubuntu 20.04\r\n * Python Version: 3.8\r\n * yt version: 4.1.dev `yt/main` commit `e387a2`\r\n * Install from source code via pip.\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom yt.funcs import mylog\nfrom yt.units.yt_array import YTArray\nfrom yt.utilities.exceptions import YTException\n\n\ndef _aligned(a, b):\n aligned_component = np.abs(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n return np.isclose(aligned_component, 1.0, 1.0e-13)\n\n\ndef _validate_unit_vectors(normal_vector, north_vector):\n\n # Make sure vectors are unitless\n if north_vector is not None:\n north_vector = YTArray(north_vector, \"\", dtype=\"float64\")\n if normal_vector is not None:\n normal_vector = YTArray(normal_vector, \"\", dtype=\"float64\")\n\n if not np.dot(normal_vector, normal_vector) > 0:\n raise YTException(\"normal_vector cannot be the zero vector.\")\n if north_vector is not None and _aligned(north_vector, normal_vector):\n raise YTException(\"normal_vector and north_vector cannot be aligned.\")\n\n return normal_vector, north_vector\n\n\nclass Orientation:\n def __init__(self, normal_vector, north_vector=None, steady_north=False):\n r\"\"\"An object that returns a set of basis vectors for orienting\n cameras a data containers.\n\n Parameters\n ----------\n normal_vector : array_like\n A vector normal to the image plane\n north_vector : array_like, optional\n The 'up' direction to orient the image plane.\n If not specified, gets calculated automatically\n steady_north : bool, optional\n Boolean to control whether to normalize the north_vector\n by subtracting off the dot product of it and the normal\n vector. Makes it easier to do rotations along a single\n axis. If north_vector is specified, is switched to\n True. Default: False\n\n \"\"\"\n\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n self.steady_north = steady_north\n if north_vector is not None:\n self.steady_north = True\n self.north_vector = north_vector\n self._setup_normalized_vectors(normal_vector, north_vector)\n if self.north_vector is None:\n self.north_vector = self.unit_vectors[1]\n\n def _setup_normalized_vectors(self, normal_vector, north_vector):\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n mylog.debug(\"Setting normalized vectors %s %s\", normal_vector, north_vector)\n # Now we set up our various vectors\n normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))\n if north_vector is None:\n vecs = np.identity(3)\n t = np.cross(normal_vector, vecs).sum(axis=1)\n ax = t.argmax()\n east_vector = np.cross(vecs[ax, :], normal_vector).ravel()\n # self.north_vector must remain None otherwise rotations about a fixed axis\n # will break. The north_vector calculated here will still be included\n # in self.unit_vectors.\n north_vector = np.cross(normal_vector, east_vector).ravel()\n else:\n if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):\n north_vector = (\n north_vector - np.dot(north_vector, normal_vector) * normal_vector\n )\n east_vector = np.cross(north_vector, normal_vector).ravel()\n north_vector /= np.sqrt(np.dot(north_vector, north_vector))\n east_vector /= np.sqrt(np.dot(east_vector, east_vector))\n self.normal_vector = normal_vector\n self.north_vector = north_vector\n self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], \"\")\n self.inv_mat = np.linalg.pinv(self.unit_vectors)\n", "path": "yt/utilities/orientation.py"}], "after_files": [{"content": "import numpy as np\n\nfrom yt.units.yt_array import YTArray\nfrom yt.utilities.exceptions import YTException\n\n\ndef _aligned(a, b):\n aligned_component = np.abs(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n return np.isclose(aligned_component, 1.0, 1.0e-13)\n\n\ndef _validate_unit_vectors(normal_vector, north_vector):\n\n # Make sure vectors are unitless\n if north_vector is not None:\n north_vector = YTArray(north_vector, \"\", dtype=\"float64\")\n if normal_vector is not None:\n normal_vector = YTArray(normal_vector, \"\", dtype=\"float64\")\n\n if not np.dot(normal_vector, normal_vector) > 0:\n raise YTException(\"normal_vector cannot be the zero vector.\")\n if north_vector is not None and _aligned(north_vector, normal_vector):\n raise YTException(\"normal_vector and north_vector cannot be aligned.\")\n\n return normal_vector, north_vector\n\n\nclass Orientation:\n def __init__(self, normal_vector, north_vector=None, steady_north=False):\n r\"\"\"An object that returns a set of basis vectors for orienting\n cameras a data containers.\n\n Parameters\n ----------\n normal_vector : array_like\n A vector normal to the image plane\n north_vector : array_like, optional\n The 'up' direction to orient the image plane.\n If not specified, gets calculated automatically\n steady_north : bool, optional\n Boolean to control whether to normalize the north_vector\n by subtracting off the dot product of it and the normal\n vector. Makes it easier to do rotations along a single\n axis. If north_vector is specified, is switched to\n True. Default: False\n\n \"\"\"\n\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n self.steady_north = steady_north\n if north_vector is not None:\n self.steady_north = True\n self.north_vector = north_vector\n self._setup_normalized_vectors(normal_vector, north_vector)\n if self.north_vector is None:\n self.north_vector = self.unit_vectors[1]\n\n def _setup_normalized_vectors(self, normal_vector, north_vector):\n normal_vector, north_vector = _validate_unit_vectors(\n normal_vector, north_vector\n )\n # Now we set up our various vectors\n normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))\n if north_vector is None:\n vecs = np.identity(3)\n t = np.cross(normal_vector, vecs).sum(axis=1)\n ax = t.argmax()\n east_vector = np.cross(vecs[ax, :], normal_vector).ravel()\n # self.north_vector must remain None otherwise rotations about a fixed axis\n # will break. The north_vector calculated here will still be included\n # in self.unit_vectors.\n north_vector = np.cross(normal_vector, east_vector).ravel()\n else:\n if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):\n north_vector = (\n north_vector - np.dot(north_vector, normal_vector) * normal_vector\n )\n east_vector = np.cross(north_vector, normal_vector).ravel()\n north_vector /= np.sqrt(np.dot(north_vector, north_vector))\n east_vector /= np.sqrt(np.dot(east_vector, east_vector))\n self.normal_vector = normal_vector\n self.north_vector = north_vector\n self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], \"\")\n self.inv_mat = np.linalg.pinv(self.unit_vectors)\n", "path": "yt/utilities/orientation.py"}]} | 2,598 | 169 |
gh_patches_debug_25364 | rasdani/github-patches | git_diff | pypa__setuptools-1312 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
find_packages() doesn't find PEP 420 packages
Originally reported by: **gwideman (Bitbucket: [gwideman](http://bitbucket.org/gwideman), GitHub: [gwideman](http://github.com/gwideman))**
---
```
#!python
setup(...
packages=find_packages(..)
)
```
on the developer machine will fail to find packages that lack a `__init__.py` file, as is allowed in Python 3.3. However, such packages listed explicitly: packages=['mypkg'] do appear to get included and later installed.
Note: When testing this, before each test be sure to delete all generated metadata, including that which setup may previously have placed in the original source directory, as it seems that setup may use metadata created on a previous run in order to include files.
This is part of a general problem reported in issue #83, but I've logged it separately as it's specifically about setuptools.
---
- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/97
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/__init__.py`
Content:
```
1 """Extensions to the 'distutils' for large or complex distributions"""
2
3 import os
4 import functools
5 import distutils.core
6 import distutils.filelist
7 from distutils.util import convert_path
8 from fnmatch import fnmatchcase
9
10 from setuptools.extern.six.moves import filter, map
11
12 import setuptools.version
13 from setuptools.extension import Extension
14 from setuptools.dist import Distribution, Feature
15 from setuptools.depends import Require
16 from . import monkey
17
18 __metaclass__ = type
19
20 __all__ = [
21 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
22 'find_packages',
23 ]
24
25 __version__ = setuptools.version.__version__
26
27 bootstrap_install_from = None
28
29 # If we run 2to3 on .py files, should we also convert docstrings?
30 # Default: yes; assume that we can detect doctests reliably
31 run_2to3_on_doctests = True
32 # Standard package names for fixer packages
33 lib2to3_fixer_packages = ['lib2to3.fixes']
34
35
36 class PackageFinder:
37 """
38 Generate a list of all Python packages found within a directory
39 """
40
41 @classmethod
42 def find(cls, where='.', exclude=(), include=('*',)):
43 """Return a list all Python packages found within directory 'where'
44
45 'where' is the root directory which will be searched for packages. It
46 should be supplied as a "cross-platform" (i.e. URL-style) path; it will
47 be converted to the appropriate local path syntax.
48
49 'exclude' is a sequence of package names to exclude; '*' can be used
50 as a wildcard in the names, such that 'foo.*' will exclude all
51 subpackages of 'foo' (but not 'foo' itself).
52
53 'include' is a sequence of package names to include. If it's
54 specified, only the named packages will be included. If it's not
55 specified, all found packages will be included. 'include' can contain
56 shell style wildcard patterns just like 'exclude'.
57 """
58
59 return list(cls._find_packages_iter(
60 convert_path(where),
61 cls._build_filter('ez_setup', '*__pycache__', *exclude),
62 cls._build_filter(*include)))
63
64 @classmethod
65 def _find_packages_iter(cls, where, exclude, include):
66 """
67 All the packages found in 'where' that pass the 'include' filter, but
68 not the 'exclude' filter.
69 """
70 for root, dirs, files in os.walk(where, followlinks=True):
71 # Copy dirs to iterate over it, then empty dirs.
72 all_dirs = dirs[:]
73 dirs[:] = []
74
75 for dir in all_dirs:
76 full_path = os.path.join(root, dir)
77 rel_path = os.path.relpath(full_path, where)
78 package = rel_path.replace(os.path.sep, '.')
79
80 # Skip directory trees that are not valid packages
81 if ('.' in dir or not cls._looks_like_package(full_path)):
82 continue
83
84 # Should this package be included?
85 if include(package) and not exclude(package):
86 yield package
87
88 # Keep searching subdirectories, as there may be more packages
89 # down there, even if the parent was excluded.
90 dirs.append(dir)
91
92 @staticmethod
93 def _looks_like_package(path):
94 """Does a directory look like a package?"""
95 return os.path.isfile(os.path.join(path, '__init__.py'))
96
97 @staticmethod
98 def _build_filter(*patterns):
99 """
100 Given a list of patterns, return a callable that will be true only if
101 the input matches at least one of the patterns.
102 """
103 return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
104
105
106 class PEP420PackageFinder(PackageFinder):
107 @staticmethod
108 def _looks_like_package(path):
109 return True
110
111
112 find_packages = PackageFinder.find
113
114
115 def _install_setup_requires(attrs):
116 # Note: do not use `setuptools.Distribution` directly, as
117 # our PEP 517 backend patch `distutils.core.Distribution`.
118 dist = distutils.core.Distribution(dict(
119 (k, v) for k, v in attrs.items()
120 if k in ('dependency_links', 'setup_requires')
121 ))
122 # Honor setup.cfg's options.
123 dist.parse_config_files(ignore_option_errors=True)
124 if dist.setup_requires:
125 dist.fetch_build_eggs(dist.setup_requires)
126
127
128 def setup(**attrs):
129 # Make sure we have any requirements needed to interpret 'attrs'.
130 _install_setup_requires(attrs)
131 return distutils.core.setup(**attrs)
132
133 setup.__doc__ = distutils.core.setup.__doc__
134
135
136 _Command = monkey.get_unpatched(distutils.core.Command)
137
138
139 class Command(_Command):
140 __doc__ = _Command.__doc__
141
142 command_consumes_arguments = False
143
144 def __init__(self, dist, **kw):
145 """
146 Construct the command for dist, updating
147 vars(self) with any keyword parameters.
148 """
149 _Command.__init__(self, dist)
150 vars(self).update(kw)
151
152 def reinitialize_command(self, command, reinit_subcommands=0, **kw):
153 cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
154 vars(cmd).update(kw)
155 return cmd
156
157
158 def _find_all_simple(path):
159 """
160 Find all files under 'path'
161 """
162 results = (
163 os.path.join(base, file)
164 for base, dirs, files in os.walk(path, followlinks=True)
165 for file in files
166 )
167 return filter(os.path.isfile, results)
168
169
170 def findall(dir=os.curdir):
171 """
172 Find all files under 'dir' and return the list of full filenames.
173 Unless dir is '.', return full filenames with dir prepended.
174 """
175 files = _find_all_simple(dir)
176 if dir == os.curdir:
177 make_rel = functools.partial(os.path.relpath, start=dir)
178 files = map(make_rel, files)
179 return list(files)
180
181
182 monkey.patch_all()
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/__init__.py b/setuptools/__init__.py
--- a/setuptools/__init__.py
+++ b/setuptools/__init__.py
@@ -1,12 +1,14 @@
"""Extensions to the 'distutils' for large or complex distributions"""
import os
+import sys
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
+from setuptools.extern.six import PY3
from setuptools.extern.six.moves import filter, map
import setuptools.version
@@ -17,11 +19,15 @@
__metaclass__ = type
+
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
- 'find_packages',
+ 'find_packages'
]
+if PY3:
+ __all__.append('find_packages_ns')
+
__version__ = setuptools.version.__version__
bootstrap_install_from = None
@@ -111,6 +117,9 @@
find_packages = PackageFinder.find
+if PY3:
+ find_packages_ns = PEP420PackageFinder.find
+
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
| {"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -1,12 +1,14 @@\n \"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n \n import os\n+import sys\n import functools\n import distutils.core\n import distutils.filelist\n from distutils.util import convert_path\n from fnmatch import fnmatchcase\n \n+from setuptools.extern.six import PY3\n from setuptools.extern.six.moves import filter, map\n \n import setuptools.version\n@@ -17,11 +19,15 @@\n \n __metaclass__ = type\n \n+\n __all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n- 'find_packages',\n+ 'find_packages'\n ]\n \n+if PY3:\n+ __all__.append('find_packages_ns')\n+\n __version__ = setuptools.version.__version__\n \n bootstrap_install_from = None\n@@ -111,6 +117,9 @@\n \n find_packages = PackageFinder.find\n \n+if PY3:\n+ find_packages_ns = PEP420PackageFinder.find\n+\n \n def _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n", "issue": "find_packages() doesn't find PEP 420 packages\nOriginally reported by: **gwideman (Bitbucket: [gwideman](http://bitbucket.org/gwideman), GitHub: [gwideman](http://github.com/gwideman))**\n\n---\n\n```\n#!python\n\nsetup(...\n packages=find_packages(..)\n )\n```\n\non the developer machine will fail to find packages that lack a `__init__.py` file, as is allowed in Python 3.3. However, such packages listed explicitly: packages=['mypkg'] do appear to get included and later installed.\n\nNote: When testing this, before each test be sure to delete all generated metadata, including that which setup may previously have placed in the original source directory, as it seems that setup may use metadata created on a previous run in order to include files.\n\nThis is part of a general problem reported in issue #83, but I've logged it separately as it's specifically about setuptools.\n\n---\n- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/97\n\n", "before_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport functools\nimport distutils.core\nimport distutils.filelist\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom setuptools.extern.six.moves import filter, map\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution, Feature\nfrom setuptools.depends import Require\nfrom . import monkey\n\n__metaclass__ = type\n\n__all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n 'find_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n# If we run 2to3 on .py files, should we also convert docstrings?\n# Default: yes; assume that we can detect doctests reliably\nrun_2to3_on_doctests = True\n# Standard package names for fixer packages\nlib2to3_fixer_packages = ['lib2to3.fixes']\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include)))\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if ('.' in dir or not cls._looks_like_package(full_path)):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n dist = distutils.core.Distribution(dict(\n (k, v) for k, v in attrs.items()\n if k in ('dependency_links', 'setup_requires')\n ))\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}], "after_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport sys\nimport functools\nimport distutils.core\nimport distutils.filelist\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom setuptools.extern.six import PY3\nfrom setuptools.extern.six.moves import filter, map\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution, Feature\nfrom setuptools.depends import Require\nfrom . import monkey\n\n__metaclass__ = type\n\n\n__all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n 'find_packages'\n]\n\nif PY3:\n __all__.append('find_packages_ns')\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n# If we run 2to3 on .py files, should we also convert docstrings?\n# Default: yes; assume that we can detect doctests reliably\nrun_2to3_on_doctests = True\n# Standard package names for fixer packages\nlib2to3_fixer_packages = ['lib2to3.fixes']\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include)))\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if ('.' in dir or not cls._looks_like_package(full_path)):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\n\nif PY3:\n find_packages_ns = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n dist = distutils.core.Distribution(dict(\n (k, v) for k, v in attrs.items()\n if k in ('dependency_links', 'setup_requires')\n ))\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]} | 2,243 | 287 |
gh_patches_debug_9979 | rasdani/github-patches | git_diff | open-mmlab__mmaction2-1355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ssn_head.py device unaligned
**Describe the bug**
I trained a SSN successfully on a new dataset. But when I test the checkpoint, an error raised. And I think it's a inner bug.
**Reproduction**
1. What command or script did you run?
```
PYTHONPATH=$PWD:$PYTHONPATH mim test mmaction configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py --checkpoint work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --gpus 1 --eval mAP
```
**Environment**
```
sys.platform: linux
Python: 3.9.7 (default, Sep 16 2021, 13:09:58) [GCC 7.5.0]
CUDA available: True
GPU 0,1: NVIDIA GeForce RTX 2080 Ti
CUDA_HOME: /usr/local/cuda
NVCC: Build cuda_11.5.r11.5/compiler.30672275_0
GCC: gcc (Ubuntu 8.4.0-3ubuntu2) 8.4.0
PyTorch: 1.10.1
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.3
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37
- CuDNN 8.2
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,
TorchVision: 0.11.2
OpenCV: 4.5.4
MMCV: 1.4.1
MMCV Compiler: GCC 7.3
MMCV CUDA Compiler: 11.3
MMAction2: 0.20.0+3758171
```
**Error traceback**
```shell
Testing command is python /home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --launcher none --eval mAP.
2021-12-24 12:03:40,264 - mmaction - INFO - 13 out of 13 videos are valid.
2021-12-24 12:03:40,290 - mmaction - INFO - SSNDataset: proposal file my_data/dfmad70/proposals/dfmad_bmn_test_100_proposal_list.txt parsed.
load checkpoint from local path: work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth
[ ] 0/13, elapsed: 0s, ETA:Traceback (most recent call last):
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py", line 365, in <module>
main()
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py", line 350, in main
outputs = inference_pytorch(args, cfg, distributed, data_loader)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py", line 161, in inference_pytorch
outputs = single_gpu_test(model, data_loader)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/engine/test.py", line 33, in single_gpu_test
result = model(return_loss=False, **data)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/parallel/data_parallel.py", line 50, in forward
return super().forward(*inputs, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/base.py", line 166, in forward
return self.forward_test(*args, **kwargs)
File "/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/ssn.py", line 120, in forward_test
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
```
**Bug fix**
the bug come from here:
https://github.com/open-mmlab/mmaction2/blob/6f98109b682c5689dc0548a98b89ed1c3b956ddb/mmaction/models/localizers/ssn.py#L114-L121
We can see that all tensor are transformed to ``cpu().numpy()`` except the ``reg_norm_consts``.
Adding the below line solved the error:
```python
reg_norm_consts = reg_norm_consts.cpu().numpy()
```
I'd like to create a PR if need.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmaction/models/localizers/ssn.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import torch
3 import torch.nn as nn
4
5 from .. import builder
6 from ..builder import LOCALIZERS
7 from .base import BaseTAGClassifier
8
9
10 @LOCALIZERS.register_module()
11 class SSN(BaseTAGClassifier):
12 """Temporal Action Detection with Structured Segment Networks.
13
14 Args:
15 backbone (dict): Config for building backbone.
16 cls_head (dict): Config for building classification head.
17 in_channels (int): Number of channels for input data.
18 Default: 3.
19 spatial_type (str): Type of spatial pooling.
20 Default: 'avg'.
21 dropout_ratio (float): Ratio of dropout.
22 Default: 0.5.
23 loss_cls (dict): Config for building loss.
24 Default: ``dict(type='SSNLoss')``.
25 train_cfg (dict | None): Config for training. Default: None.
26 test_cfg (dict | None): Config for testing. Default: None.
27 """
28
29 def __init__(self,
30 backbone,
31 cls_head,
32 in_channels=3,
33 spatial_type='avg',
34 dropout_ratio=0.5,
35 loss_cls=dict(type='SSNLoss'),
36 train_cfg=None,
37 test_cfg=None):
38
39 super().__init__(backbone, cls_head, train_cfg, test_cfg)
40
41 self.is_test_prepared = False
42 self.in_channels = in_channels
43
44 self.spatial_type = spatial_type
45 if self.spatial_type == 'avg':
46 self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
47 elif self.spatial_type == 'max':
48 self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
49 else:
50 self.pool = None
51
52 self.dropout_ratio = dropout_ratio
53 if self.dropout_ratio != 0:
54 self.dropout = nn.Dropout(p=self.dropout_ratio)
55 else:
56 self.dropout = None
57 self.loss_cls = builder.build_loss(loss_cls)
58
59 def forward_train(self, imgs, proposal_scale_factor, proposal_type,
60 proposal_labels, reg_targets, **kwargs):
61 """Define the computation performed at every call when training."""
62 imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
63
64 x = self.extract_feat(imgs)
65
66 if self.pool:
67 x = self.pool(x)
68 if self.dropout is not None:
69 x = self.dropout(x)
70
71 activity_scores, completeness_scores, bbox_preds = self.cls_head(
72 (x, proposal_scale_factor))
73
74 loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
75 proposal_type, proposal_labels, reg_targets,
76 self.train_cfg)
77 loss_dict = dict(**loss)
78
79 return loss_dict
80
81 def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
82 proposal_tick_list, reg_norm_consts, **kwargs):
83 """Define the computation performed at every call when testing."""
84 num_crops = imgs.shape[0]
85 imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
86 num_ticks = imgs.shape[1]
87
88 output = []
89 minibatch_size = self.test_cfg.ssn.sampler.batch_size
90 for idx in range(0, num_ticks, minibatch_size):
91 chunk = imgs[:, idx:idx +
92 minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
93 x = self.extract_feat(chunk)
94 if self.pool:
95 x = self.pool(x)
96 # Merge crop to save memory.
97 x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
98 output.append(x)
99 output = torch.cat(output, dim=0)
100
101 relative_proposal_list = relative_proposal_list.squeeze(0)
102 proposal_tick_list = proposal_tick_list.squeeze(0)
103 scale_factor_list = scale_factor_list.squeeze(0)
104 reg_norm_consts = reg_norm_consts.squeeze(0)
105
106 if not self.is_test_prepared:
107 self.is_test_prepared = self.cls_head.prepare_test_fc(
108 self.cls_head.consensus.num_multipliers)
109
110 (output, activity_scores, completeness_scores,
111 bbox_preds) = self.cls_head(
112 (output, proposal_tick_list, scale_factor_list), test_mode=True)
113
114 relative_proposal_list = relative_proposal_list.cpu().numpy()
115 activity_scores = activity_scores.cpu().numpy()
116 completeness_scores = completeness_scores.cpu().numpy()
117 if bbox_preds is not None:
118 bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
119 bbox_preds[:, :, 0] = (
120 bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
121 reg_norm_consts[0, 0])
122 bbox_preds[:, :, 1] = (
123 bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
124 reg_norm_consts[0, 1])
125 bbox_preds = bbox_preds.cpu().numpy()
126
127 result = [
128 dict(
129 relative_proposal_list=relative_proposal_list,
130 activity_scores=activity_scores,
131 completeness_scores=completeness_scores,
132 bbox_preds=bbox_preds)
133 ]
134
135 return result
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmaction/models/localizers/ssn.py b/mmaction/models/localizers/ssn.py
--- a/mmaction/models/localizers/ssn.py
+++ b/mmaction/models/localizers/ssn.py
@@ -114,6 +114,7 @@
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
+ reg_norm_consts = reg_norm_consts.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
| {"golden_diff": "diff --git a/mmaction/models/localizers/ssn.py b/mmaction/models/localizers/ssn.py\n--- a/mmaction/models/localizers/ssn.py\n+++ b/mmaction/models/localizers/ssn.py\n@@ -114,6 +114,7 @@\n relative_proposal_list = relative_proposal_list.cpu().numpy()\n activity_scores = activity_scores.cpu().numpy()\n completeness_scores = completeness_scores.cpu().numpy()\n+ reg_norm_consts = reg_norm_consts.cpu().numpy()\n if bbox_preds is not None:\n bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)\n bbox_preds[:, :, 0] = (\n", "issue": "ssn_head.py device unaligned\n**Describe the bug**\r\n\r\nI trained a SSN successfully on a new dataset. But when I test the checkpoint, an error raised. And I think it's a inner bug.\r\n\r\n\r\n**Reproduction**\r\n\r\n1. What command or script did you run?\r\n\r\n```\r\nPYTHONPATH=$PWD:$PYTHONPATH mim test mmaction configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py --checkpoint work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --gpus 1 --eval mAP\r\n```\r\n\r\n\r\n**Environment**\r\n```\r\nsys.platform: linux\r\nPython: 3.9.7 (default, Sep 16 2021, 13:09:58) [GCC 7.5.0]\r\nCUDA available: True\r\nGPU 0,1: NVIDIA GeForce RTX 2080 Ti\r\nCUDA_HOME: /usr/local/cuda\r\nNVCC: Build cuda_11.5.r11.5/compiler.30672275_0\r\nGCC: gcc (Ubuntu 8.4.0-3ubuntu2) 8.4.0\r\nPyTorch: 1.10.1\r\nPyTorch compiling details: PyTorch built with:\r\n - GCC 7.3\r\n - C++ Version: 201402\r\n - Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - LAPACK is enabled (usually provided by MKL)\r\n - NNPACK is enabled\r\n - CPU capability usage: AVX2\r\n - CUDA Runtime 11.3\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37\r\n - CuDNN 8.2\r\n - Magma 2.5.2\r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \r\n\r\nTorchVision: 0.11.2\r\nOpenCV: 4.5.4\r\nMMCV: 1.4.1\r\nMMCV Compiler: GCC 7.3\r\nMMCV CUDA Compiler: 11.3\r\nMMAction2: 0.20.0+3758171\r\n```\r\n\r\n**Error traceback**\r\n```shell\r\nTesting command is python /home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py configs/localization/ssn/ssn_r50_450e_dfmad_rgb_test.py work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth --launcher none --eval mAP. \r\n2021-12-24 12:03:40,264 - mmaction - INFO - 13 out of 13 videos are valid.\r\n2021-12-24 12:03:40,290 - mmaction - INFO - SSNDataset: proposal file my_data/dfmad70/proposals/dfmad_bmn_test_100_proposal_list.txt parsed.\r\nload checkpoint from local path: work_dirs/ssn_r50_450e_dfmad_rgb/latest.pth\r\n[ ] 0/13, elapsed: 0s, ETA:Traceback (most recent call last):\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py\", line 365, in <module>\r\n main()\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py\", line 350, in main\r\n outputs = inference_pytorch(args, cfg, distributed, data_loader)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/.mim/tools/test.py\", line 161, in inference_pytorch\r\n outputs = single_gpu_test(model, data_loader)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/engine/test.py\", line 33, in single_gpu_test\r\n result = model(return_loss=False, **data)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmcv/parallel/data_parallel.py\", line 50, in forward\r\n return super().forward(*inputs, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/parallel/data_parallel.py\", line 166, in forward\r\n return self.module(*inputs[0], **kwargs[0])\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1102, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/base.py\", line 166, in forward\r\n return self.forward_test(*args, **kwargs)\r\n File \"/home/louis/miniconda3/envs/open-mmlab/lib/python3.9/site-packages/mmaction/models/localizers/ssn.py\", line 120, in forward_test\r\n bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +\r\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\r\n```\r\n\r\n**Bug fix**\r\nthe bug come from here:\r\nhttps://github.com/open-mmlab/mmaction2/blob/6f98109b682c5689dc0548a98b89ed1c3b956ddb/mmaction/models/localizers/ssn.py#L114-L121\r\nWe can see that all tensor are transformed to ``cpu().numpy()`` except the ``reg_norm_consts``.\r\nAdding the below line solved the error:\r\n```python\r\nreg_norm_consts = reg_norm_consts.cpu().numpy()\r\n```\r\nI'd like to create a PR if need.\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom .. import builder\nfrom ..builder import LOCALIZERS\nfrom .base import BaseTAGClassifier\n\n\[email protected]_module()\nclass SSN(BaseTAGClassifier):\n \"\"\"Temporal Action Detection with Structured Segment Networks.\n\n Args:\n backbone (dict): Config for building backbone.\n cls_head (dict): Config for building classification head.\n in_channels (int): Number of channels for input data.\n Default: 3.\n spatial_type (str): Type of spatial pooling.\n Default: 'avg'.\n dropout_ratio (float): Ratio of dropout.\n Default: 0.5.\n loss_cls (dict): Config for building loss.\n Default: ``dict(type='SSNLoss')``.\n train_cfg (dict | None): Config for training. Default: None.\n test_cfg (dict | None): Config for testing. Default: None.\n \"\"\"\n\n def __init__(self,\n backbone,\n cls_head,\n in_channels=3,\n spatial_type='avg',\n dropout_ratio=0.5,\n loss_cls=dict(type='SSNLoss'),\n train_cfg=None,\n test_cfg=None):\n\n super().__init__(backbone, cls_head, train_cfg, test_cfg)\n\n self.is_test_prepared = False\n self.in_channels = in_channels\n\n self.spatial_type = spatial_type\n if self.spatial_type == 'avg':\n self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)\n elif self.spatial_type == 'max':\n self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)\n else:\n self.pool = None\n\n self.dropout_ratio = dropout_ratio\n if self.dropout_ratio != 0:\n self.dropout = nn.Dropout(p=self.dropout_ratio)\n else:\n self.dropout = None\n self.loss_cls = builder.build_loss(loss_cls)\n\n def forward_train(self, imgs, proposal_scale_factor, proposal_type,\n proposal_labels, reg_targets, **kwargs):\n \"\"\"Define the computation performed at every call when training.\"\"\"\n imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])\n\n x = self.extract_feat(imgs)\n\n if self.pool:\n x = self.pool(x)\n if self.dropout is not None:\n x = self.dropout(x)\n\n activity_scores, completeness_scores, bbox_preds = self.cls_head(\n (x, proposal_scale_factor))\n\n loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,\n proposal_type, proposal_labels, reg_targets,\n self.train_cfg)\n loss_dict = dict(**loss)\n\n return loss_dict\n\n def forward_test(self, imgs, relative_proposal_list, scale_factor_list,\n proposal_tick_list, reg_norm_consts, **kwargs):\n \"\"\"Define the computation performed at every call when testing.\"\"\"\n num_crops = imgs.shape[0]\n imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])\n num_ticks = imgs.shape[1]\n\n output = []\n minibatch_size = self.test_cfg.ssn.sampler.batch_size\n for idx in range(0, num_ticks, minibatch_size):\n chunk = imgs[:, idx:idx +\n minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])\n x = self.extract_feat(chunk)\n if self.pool:\n x = self.pool(x)\n # Merge crop to save memory.\n x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)\n output.append(x)\n output = torch.cat(output, dim=0)\n\n relative_proposal_list = relative_proposal_list.squeeze(0)\n proposal_tick_list = proposal_tick_list.squeeze(0)\n scale_factor_list = scale_factor_list.squeeze(0)\n reg_norm_consts = reg_norm_consts.squeeze(0)\n\n if not self.is_test_prepared:\n self.is_test_prepared = self.cls_head.prepare_test_fc(\n self.cls_head.consensus.num_multipliers)\n\n (output, activity_scores, completeness_scores,\n bbox_preds) = self.cls_head(\n (output, proposal_tick_list, scale_factor_list), test_mode=True)\n\n relative_proposal_list = relative_proposal_list.cpu().numpy()\n activity_scores = activity_scores.cpu().numpy()\n completeness_scores = completeness_scores.cpu().numpy()\n if bbox_preds is not None:\n bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)\n bbox_preds[:, :, 0] = (\n bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +\n reg_norm_consts[0, 0])\n bbox_preds[:, :, 1] = (\n bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +\n reg_norm_consts[0, 1])\n bbox_preds = bbox_preds.cpu().numpy()\n\n result = [\n dict(\n relative_proposal_list=relative_proposal_list,\n activity_scores=activity_scores,\n completeness_scores=completeness_scores,\n bbox_preds=bbox_preds)\n ]\n\n return result\n", "path": "mmaction/models/localizers/ssn.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom .. import builder\nfrom ..builder import LOCALIZERS\nfrom .base import BaseTAGClassifier\n\n\[email protected]_module()\nclass SSN(BaseTAGClassifier):\n \"\"\"Temporal Action Detection with Structured Segment Networks.\n\n Args:\n backbone (dict): Config for building backbone.\n cls_head (dict): Config for building classification head.\n in_channels (int): Number of channels for input data.\n Default: 3.\n spatial_type (str): Type of spatial pooling.\n Default: 'avg'.\n dropout_ratio (float): Ratio of dropout.\n Default: 0.5.\n loss_cls (dict): Config for building loss.\n Default: ``dict(type='SSNLoss')``.\n train_cfg (dict | None): Config for training. Default: None.\n test_cfg (dict | None): Config for testing. Default: None.\n \"\"\"\n\n def __init__(self,\n backbone,\n cls_head,\n in_channels=3,\n spatial_type='avg',\n dropout_ratio=0.5,\n loss_cls=dict(type='SSNLoss'),\n train_cfg=None,\n test_cfg=None):\n\n super().__init__(backbone, cls_head, train_cfg, test_cfg)\n\n self.is_test_prepared = False\n self.in_channels = in_channels\n\n self.spatial_type = spatial_type\n if self.spatial_type == 'avg':\n self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)\n elif self.spatial_type == 'max':\n self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)\n else:\n self.pool = None\n\n self.dropout_ratio = dropout_ratio\n if self.dropout_ratio != 0:\n self.dropout = nn.Dropout(p=self.dropout_ratio)\n else:\n self.dropout = None\n self.loss_cls = builder.build_loss(loss_cls)\n\n def forward_train(self, imgs, proposal_scale_factor, proposal_type,\n proposal_labels, reg_targets, **kwargs):\n \"\"\"Define the computation performed at every call when training.\"\"\"\n imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])\n\n x = self.extract_feat(imgs)\n\n if self.pool:\n x = self.pool(x)\n if self.dropout is not None:\n x = self.dropout(x)\n\n activity_scores, completeness_scores, bbox_preds = self.cls_head(\n (x, proposal_scale_factor))\n\n loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,\n proposal_type, proposal_labels, reg_targets,\n self.train_cfg)\n loss_dict = dict(**loss)\n\n return loss_dict\n\n def forward_test(self, imgs, relative_proposal_list, scale_factor_list,\n proposal_tick_list, reg_norm_consts, **kwargs):\n \"\"\"Define the computation performed at every call when testing.\"\"\"\n num_crops = imgs.shape[0]\n imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])\n num_ticks = imgs.shape[1]\n\n output = []\n minibatch_size = self.test_cfg.ssn.sampler.batch_size\n for idx in range(0, num_ticks, minibatch_size):\n chunk = imgs[:, idx:idx +\n minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])\n x = self.extract_feat(chunk)\n if self.pool:\n x = self.pool(x)\n # Merge crop to save memory.\n x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)\n output.append(x)\n output = torch.cat(output, dim=0)\n\n relative_proposal_list = relative_proposal_list.squeeze(0)\n proposal_tick_list = proposal_tick_list.squeeze(0)\n scale_factor_list = scale_factor_list.squeeze(0)\n reg_norm_consts = reg_norm_consts.squeeze(0)\n\n if not self.is_test_prepared:\n self.is_test_prepared = self.cls_head.prepare_test_fc(\n self.cls_head.consensus.num_multipliers)\n\n (output, activity_scores, completeness_scores,\n bbox_preds) = self.cls_head(\n (output, proposal_tick_list, scale_factor_list), test_mode=True)\n\n relative_proposal_list = relative_proposal_list.cpu().numpy()\n activity_scores = activity_scores.cpu().numpy()\n completeness_scores = completeness_scores.cpu().numpy()\n reg_norm_consts = reg_norm_consts.cpu().numpy()\n if bbox_preds is not None:\n bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)\n bbox_preds[:, :, 0] = (\n bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +\n reg_norm_consts[0, 0])\n bbox_preds[:, :, 1] = (\n bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +\n reg_norm_consts[0, 1])\n bbox_preds = bbox_preds.cpu().numpy()\n\n result = [\n dict(\n relative_proposal_list=relative_proposal_list,\n activity_scores=activity_scores,\n completeness_scores=completeness_scores,\n bbox_preds=bbox_preds)\n ]\n\n return result\n", "path": "mmaction/models/localizers/ssn.py"}]} | 3,784 | 142 |
gh_patches_debug_19185 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-881 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failing to load dialog and vocab should not be errors
Failure to load dialog files and vocab files are currently logged using the error log level. this should probably be debug or warnings since the ERROR text in the log confuses users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/core.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import abc
20 import imp
21 import time
22
23 import os.path
24 import re
25 import time
26 from os.path import join, dirname, splitext, isdir
27
28 from adapt.intent import Intent
29
30 from mycroft.client.enclosure.api import EnclosureAPI
31 from mycroft.configuration import ConfigurationManager
32 from mycroft.dialog import DialogLoader
33 from mycroft.filesystem import FileSystemAccess
34 from mycroft.messagebus.message import Message
35 from mycroft.util.log import getLogger
36 from mycroft.skills.settings import SkillSettings
37 __author__ = 'seanfitz'
38
39 skills_config = ConfigurationManager.instance().get("skills")
40 BLACKLISTED_SKILLS = skills_config.get("blacklisted_skills", [])
41
42 SKILLS_DIR = "/opt/mycroft/skills"
43
44 MainModule = '__init__'
45
46 logger = getLogger(__name__)
47
48
49 def load_vocab_from_file(path, vocab_type, emitter):
50 if path.endswith('.voc'):
51 with open(path, 'r') as voc_file:
52 for line in voc_file.readlines():
53 parts = line.strip().split("|")
54 entity = parts[0]
55
56 emitter.emit(Message("register_vocab", {
57 'start': entity, 'end': vocab_type
58 }))
59 for alias in parts[1:]:
60 emitter.emit(Message("register_vocab", {
61 'start': alias, 'end': vocab_type, 'alias_of': entity
62 }))
63
64
65 def load_regex_from_file(path, emitter):
66 if path.endswith('.rx'):
67 with open(path, 'r') as reg_file:
68 for line in reg_file.readlines():
69 re.compile(line.strip())
70 emitter.emit(
71 Message("register_vocab", {'regex': line.strip()}))
72
73
74 def load_vocabulary(basedir, emitter):
75 for vocab_type in os.listdir(basedir):
76 if vocab_type.endswith(".voc"):
77 load_vocab_from_file(
78 join(basedir, vocab_type), splitext(vocab_type)[0], emitter)
79
80
81 def load_regex(basedir, emitter):
82 for regex_type in os.listdir(basedir):
83 if regex_type.endswith(".rx"):
84 load_regex_from_file(
85 join(basedir, regex_type), emitter)
86
87
88 def open_intent_envelope(message):
89 intent_dict = message.data
90 return Intent(intent_dict.get('name'),
91 intent_dict.get('requires'),
92 intent_dict.get('at_least_one'),
93 intent_dict.get('optional'))
94
95
96 def load_skill(skill_descriptor, emitter):
97 try:
98 logger.info("ATTEMPTING TO LOAD SKILL: " + skill_descriptor["name"])
99 if skill_descriptor['name'] in BLACKLISTED_SKILLS:
100 logger.info("SKILL IS BLACKLISTED " + skill_descriptor["name"])
101 return None
102 skill_module = imp.load_module(
103 skill_descriptor["name"] + MainModule, *skill_descriptor["info"])
104 if (hasattr(skill_module, 'create_skill') and
105 callable(skill_module.create_skill)):
106 # v2 skills framework
107 skill = skill_module.create_skill()
108 skill.bind(emitter)
109 skill._dir = dirname(skill_descriptor['info'][1])
110 skill.load_data_files(dirname(skill_descriptor['info'][1]))
111 skill.initialize()
112 logger.info("Loaded " + skill_descriptor["name"])
113 return skill
114 else:
115 logger.warn(
116 "Module %s does not appear to be skill" % (
117 skill_descriptor["name"]))
118 except:
119 logger.error(
120 "Failed to load skill: " + skill_descriptor["name"], exc_info=True)
121 return None
122
123
124 def get_skills(skills_folder):
125 logger.info("LOADING SKILLS FROM " + skills_folder)
126 skills = []
127 possible_skills = os.listdir(skills_folder)
128 for i in possible_skills:
129 location = join(skills_folder, i)
130 if (isdir(location) and
131 not MainModule + ".py" in os.listdir(location)):
132 for j in os.listdir(location):
133 name = join(location, j)
134 if (not isdir(name) or
135 not MainModule + ".py" in os.listdir(name)):
136 continue
137 skills.append(create_skill_descriptor(name))
138 if (not isdir(location) or
139 not MainModule + ".py" in os.listdir(location)):
140 continue
141
142 skills.append(create_skill_descriptor(location))
143 skills = sorted(skills, key=lambda p: p.get('name'))
144 return skills
145
146
147 def create_skill_descriptor(skill_folder):
148 info = imp.find_module(MainModule, [skill_folder])
149 return {"name": os.path.basename(skill_folder), "info": info}
150
151
152 def load_skills(emitter, skills_root=SKILLS_DIR):
153 logger.info("Checking " + skills_root + " for new skills")
154 skill_list = []
155 for skill in get_skills(skills_root):
156 skill_list.append(load_skill(skill, emitter))
157
158 return skill_list
159
160
161 def unload_skills(skills):
162 for s in skills:
163 s.shutdown()
164
165
166 class MycroftSkill(object):
167 """
168 Abstract base class which provides common behaviour and parameters to all
169 Skills implementation.
170 """
171
172 def __init__(self, name, emitter=None):
173 self.name = name
174 self.bind(emitter)
175 self.config_core = ConfigurationManager.get()
176 self.config = self.config_core.get(name)
177 self.dialog_renderer = None
178 self.file_system = FileSystemAccess(join('skills', name))
179 self.registered_intents = []
180 self.log = getLogger(name)
181 self.reload_skill = True
182 self.events = []
183
184 @property
185 def location(self):
186 """ Get the JSON data struction holding location information. """
187 # TODO: Allow Enclosure to override this for devices that
188 # contain a GPS.
189 return self.config_core.get('location')
190
191 @property
192 def location_pretty(self):
193 """ Get a more 'human' version of the location as a string. """
194 loc = self.location
195 if type(loc) is dict and loc["city"]:
196 return loc["city"]["name"]
197 return None
198
199 @property
200 def location_timezone(self):
201 """ Get the timezone code, such as 'America/Los_Angeles' """
202 loc = self.location
203 if type(loc) is dict and loc["timezone"]:
204 return loc["timezone"]["code"]
205 return None
206
207 @property
208 def lang(self):
209 return self.config_core.get('lang')
210
211 @property
212 def settings(self):
213 """ Load settings if not already loaded. """
214 try:
215 return self._settings
216 except:
217 self._settings = SkillSettings(join(self._dir, 'settings.json'))
218 return self._settings
219
220 def bind(self, emitter):
221 if emitter:
222 self.emitter = emitter
223 self.enclosure = EnclosureAPI(emitter)
224 self.__register_stop()
225
226 def __register_stop(self):
227 self.stop_time = time.time()
228 self.stop_threshold = self.config_core.get("skills").get(
229 'stop_threshold')
230 self.emitter.on('mycroft.stop', self.__handle_stop)
231
232 def detach(self):
233 for (name, intent) in self.registered_intents:
234 name = self.name + ':' + name
235 self.emitter.emit(Message("detach_intent", {"intent_name": name}))
236
237 def initialize(self):
238 """
239 Initialization function to be implemented by all Skills.
240
241 Usually used to create intents rules and register them.
242 """
243 raise Exception("Initialize not implemented for skill: " + self.name)
244
245 def register_intent(self, intent_parser, handler):
246 name = intent_parser.name
247 intent_parser.name = self.name + ':' + intent_parser.name
248 self.emitter.emit(Message("register_intent", intent_parser.__dict__))
249 self.registered_intents.append((name, intent_parser))
250
251 def receive_handler(message):
252 try:
253 handler(message)
254 except:
255 # TODO: Localize
256 self.speak(
257 "An error occurred while processing a request in " +
258 self.name)
259 logger.error(
260 "An error occurred while processing a request in " +
261 self.name, exc_info=True)
262
263 if handler:
264 self.emitter.on(intent_parser.name, receive_handler)
265 self.events.append((intent_parser.name, receive_handler))
266
267 def disable_intent(self, intent_name):
268 """Disable a registered intent"""
269 logger.debug('Disabling intent ' + intent_name)
270 name = self.name + ':' + intent_name
271 self.emitter.emit(Message("detach_intent", {"intent_name": name}))
272
273 def enable_intent(self, intent_name):
274 """Reenable a registered intent"""
275 for (name, intent) in self.registered_intents:
276 if name == intent_name:
277 self.registered_intents.remove((name, intent))
278 intent.name = name
279 self.register_intent(intent, None)
280 logger.debug('Enabling intent ' + intent_name)
281 break
282 else:
283 logger.error('Could not enable ' + intent_name +
284 ', it hasn\'t been registered.')
285
286 def register_vocabulary(self, entity, entity_type):
287 self.emitter.emit(Message('register_vocab', {
288 'start': entity, 'end': entity_type
289 }))
290
291 def register_regex(self, regex_str):
292 re.compile(regex_str) # validate regex
293 self.emitter.emit(Message('register_vocab', {'regex': regex_str}))
294
295 def speak(self, utterance, expect_response=False):
296 data = {'utterance': utterance,
297 'expect_response': expect_response}
298 self.emitter.emit(Message("speak", data))
299
300 def speak_dialog(self, key, data={}, expect_response=False):
301 data['expect_response'] = expect_response
302 self.speak(self.dialog_renderer.render(key, data))
303
304 def init_dialog(self, root_directory):
305 dialog_dir = join(root_directory, 'dialog', self.lang)
306 if os.path.exists(dialog_dir):
307 self.dialog_renderer = DialogLoader().load(dialog_dir)
308 else:
309 logger.error('No dialog loaded, ' + dialog_dir + ' does not exist')
310
311 def load_data_files(self, root_directory):
312 self.init_dialog(root_directory)
313 self.load_vocab_files(join(root_directory, 'vocab', self.lang))
314 regex_path = join(root_directory, 'regex', self.lang)
315 if os.path.exists(regex_path):
316 self.load_regex_files(regex_path)
317
318 def load_vocab_files(self, vocab_dir):
319 if os.path.exists(vocab_dir):
320 load_vocabulary(vocab_dir, self.emitter)
321 else:
322 logger.error('No vocab loaded, ' + vocab_dir + ' does not exist')
323
324 def load_regex_files(self, regex_dir):
325 load_regex(regex_dir, self.emitter)
326
327 def __handle_stop(self, event):
328 self.stop_time = time.time()
329 self.stop()
330
331 @abc.abstractmethod
332 def stop(self):
333 pass
334
335 def is_stop(self):
336 passed_time = time.time() - self.stop_time
337 return passed_time < self.stop_threshold
338
339 def shutdown(self):
340 """
341 This method is intended to be called during the skill
342 process termination. The skill implementation must
343 shutdown all processes and operations in execution.
344 """
345 # Store settings
346 self.settings.store()
347
348 # removing events
349 for e, f in self.events:
350 self.emitter.remove(e, f)
351
352 self.emitter.emit(
353 Message("detach_skill", {"skill_name": self.name + ":"}))
354 self.stop()
355
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/skills/core.py b/mycroft/skills/core.py
--- a/mycroft/skills/core.py
+++ b/mycroft/skills/core.py
@@ -306,7 +306,7 @@
if os.path.exists(dialog_dir):
self.dialog_renderer = DialogLoader().load(dialog_dir)
else:
- logger.error('No dialog loaded, ' + dialog_dir + ' does not exist')
+ logger.debug('No dialog loaded, ' + dialog_dir + ' does not exist')
def load_data_files(self, root_directory):
self.init_dialog(root_directory)
@@ -319,7 +319,7 @@
if os.path.exists(vocab_dir):
load_vocabulary(vocab_dir, self.emitter)
else:
- logger.error('No vocab loaded, ' + vocab_dir + ' does not exist')
+ logger.debug('No vocab loaded, ' + vocab_dir + ' does not exist')
def load_regex_files(self, regex_dir):
load_regex(regex_dir, self.emitter)
| {"golden_diff": "diff --git a/mycroft/skills/core.py b/mycroft/skills/core.py\n--- a/mycroft/skills/core.py\n+++ b/mycroft/skills/core.py\n@@ -306,7 +306,7 @@\n if os.path.exists(dialog_dir):\n self.dialog_renderer = DialogLoader().load(dialog_dir)\n else:\n- logger.error('No dialog loaded, ' + dialog_dir + ' does not exist')\n+ logger.debug('No dialog loaded, ' + dialog_dir + ' does not exist')\n \n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n@@ -319,7 +319,7 @@\n if os.path.exists(vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n else:\n- logger.error('No vocab loaded, ' + vocab_dir + ' does not exist')\n+ logger.debug('No vocab loaded, ' + vocab_dir + ' does not exist')\n \n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n", "issue": "Failing to load dialog and vocab should not be errors\nFailure to load dialog files and vocab files are currently logged using the error log level. this should probably be debug or warnings since the ERROR text in the log confuses users.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport abc\nimport imp\nimport time\n\nimport os.path\nimport re\nimport time\nfrom os.path import join, dirname, splitext, isdir\n\nfrom adapt.intent import Intent\n\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.dialog import DialogLoader\nfrom mycroft.filesystem import FileSystemAccess\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util.log import getLogger\nfrom mycroft.skills.settings import SkillSettings\n__author__ = 'seanfitz'\n\nskills_config = ConfigurationManager.instance().get(\"skills\")\nBLACKLISTED_SKILLS = skills_config.get(\"blacklisted_skills\", [])\n\nSKILLS_DIR = \"/opt/mycroft/skills\"\n\nMainModule = '__init__'\n\nlogger = getLogger(__name__)\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n if path.endswith('.voc'):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n\n emitter.emit(Message(\"register_vocab\", {\n 'start': entity, 'end': vocab_type\n }))\n for alias in parts[1:]:\n emitter.emit(Message(\"register_vocab\", {\n 'start': alias, 'end': vocab_type, 'alias_of': entity\n }))\n\n\ndef load_regex_from_file(path, emitter):\n if path.endswith('.rx'):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(line.strip())\n emitter.emit(\n Message(\"register_vocab\", {'regex': line.strip()}))\n\n\ndef load_vocabulary(basedir, emitter):\n for vocab_type in os.listdir(basedir):\n if vocab_type.endswith(\".voc\"):\n load_vocab_from_file(\n join(basedir, vocab_type), splitext(vocab_type)[0], emitter)\n\n\ndef load_regex(basedir, emitter):\n for regex_type in os.listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter)\n\n\ndef open_intent_envelope(message):\n intent_dict = message.data\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))\n\n\ndef load_skill(skill_descriptor, emitter):\n try:\n logger.info(\"ATTEMPTING TO LOAD SKILL: \" + skill_descriptor[\"name\"])\n if skill_descriptor['name'] in BLACKLISTED_SKILLS:\n logger.info(\"SKILL IS BLACKLISTED \" + skill_descriptor[\"name\"])\n return None\n skill_module = imp.load_module(\n skill_descriptor[\"name\"] + MainModule, *skill_descriptor[\"info\"])\n if (hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)):\n # v2 skills framework\n skill = skill_module.create_skill()\n skill.bind(emitter)\n skill._dir = dirname(skill_descriptor['info'][1])\n skill.load_data_files(dirname(skill_descriptor['info'][1]))\n skill.initialize()\n logger.info(\"Loaded \" + skill_descriptor[\"name\"])\n return skill\n else:\n logger.warn(\n \"Module %s does not appear to be skill\" % (\n skill_descriptor[\"name\"]))\n except:\n logger.error(\n \"Failed to load skill: \" + skill_descriptor[\"name\"], exc_info=True)\n return None\n\n\ndef get_skills(skills_folder):\n logger.info(\"LOADING SKILLS FROM \" + skills_folder)\n skills = []\n possible_skills = os.listdir(skills_folder)\n for i in possible_skills:\n location = join(skills_folder, i)\n if (isdir(location) and\n not MainModule + \".py\" in os.listdir(location)):\n for j in os.listdir(location):\n name = join(location, j)\n if (not isdir(name) or\n not MainModule + \".py\" in os.listdir(name)):\n continue\n skills.append(create_skill_descriptor(name))\n if (not isdir(location) or\n not MainModule + \".py\" in os.listdir(location)):\n continue\n\n skills.append(create_skill_descriptor(location))\n skills = sorted(skills, key=lambda p: p.get('name'))\n return skills\n\n\ndef create_skill_descriptor(skill_folder):\n info = imp.find_module(MainModule, [skill_folder])\n return {\"name\": os.path.basename(skill_folder), \"info\": info}\n\n\ndef load_skills(emitter, skills_root=SKILLS_DIR):\n logger.info(\"Checking \" + skills_root + \" for new skills\")\n skill_list = []\n for skill in get_skills(skills_root):\n skill_list.append(load_skill(skill, emitter))\n\n return skill_list\n\n\ndef unload_skills(skills):\n for s in skills:\n s.shutdown()\n\n\nclass MycroftSkill(object):\n \"\"\"\n Abstract base class which provides common behaviour and parameters to all\n Skills implementation.\n \"\"\"\n\n def __init__(self, name, emitter=None):\n self.name = name\n self.bind(emitter)\n self.config_core = ConfigurationManager.get()\n self.config = self.config_core.get(name)\n self.dialog_renderer = None\n self.file_system = FileSystemAccess(join('skills', name))\n self.registered_intents = []\n self.log = getLogger(name)\n self.reload_skill = True\n self.events = []\n\n @property\n def location(self):\n \"\"\" Get the JSON data struction holding location information. \"\"\"\n # TODO: Allow Enclosure to override this for devices that\n # contain a GPS.\n return self.config_core.get('location')\n\n @property\n def location_pretty(self):\n \"\"\" Get a more 'human' version of the location as a string. \"\"\"\n loc = self.location\n if type(loc) is dict and loc[\"city\"]:\n return loc[\"city\"][\"name\"]\n return None\n\n @property\n def location_timezone(self):\n \"\"\" Get the timezone code, such as 'America/Los_Angeles' \"\"\"\n loc = self.location\n if type(loc) is dict and loc[\"timezone\"]:\n return loc[\"timezone\"][\"code\"]\n return None\n\n @property\n def lang(self):\n return self.config_core.get('lang')\n\n @property\n def settings(self):\n \"\"\" Load settings if not already loaded. \"\"\"\n try:\n return self._settings\n except:\n self._settings = SkillSettings(join(self._dir, 'settings.json'))\n return self._settings\n\n def bind(self, emitter):\n if emitter:\n self.emitter = emitter\n self.enclosure = EnclosureAPI(emitter)\n self.__register_stop()\n\n def __register_stop(self):\n self.stop_time = time.time()\n self.stop_threshold = self.config_core.get(\"skills\").get(\n 'stop_threshold')\n self.emitter.on('mycroft.stop', self.__handle_stop)\n\n def detach(self):\n for (name, intent) in self.registered_intents:\n name = self.name + ':' + name\n self.emitter.emit(Message(\"detach_intent\", {\"intent_name\": name}))\n\n def initialize(self):\n \"\"\"\n Initialization function to be implemented by all Skills.\n\n Usually used to create intents rules and register them.\n \"\"\"\n raise Exception(\"Initialize not implemented for skill: \" + self.name)\n\n def register_intent(self, intent_parser, handler):\n name = intent_parser.name\n intent_parser.name = self.name + ':' + intent_parser.name\n self.emitter.emit(Message(\"register_intent\", intent_parser.__dict__))\n self.registered_intents.append((name, intent_parser))\n\n def receive_handler(message):\n try:\n handler(message)\n except:\n # TODO: Localize\n self.speak(\n \"An error occurred while processing a request in \" +\n self.name)\n logger.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n\n if handler:\n self.emitter.on(intent_parser.name, receive_handler)\n self.events.append((intent_parser.name, receive_handler))\n\n def disable_intent(self, intent_name):\n \"\"\"Disable a registered intent\"\"\"\n logger.debug('Disabling intent ' + intent_name)\n name = self.name + ':' + intent_name\n self.emitter.emit(Message(\"detach_intent\", {\"intent_name\": name}))\n\n def enable_intent(self, intent_name):\n \"\"\"Reenable a registered intent\"\"\"\n for (name, intent) in self.registered_intents:\n if name == intent_name:\n self.registered_intents.remove((name, intent))\n intent.name = name\n self.register_intent(intent, None)\n logger.debug('Enabling intent ' + intent_name)\n break\n else:\n logger.error('Could not enable ' + intent_name +\n ', it hasn\\'t been registered.')\n\n def register_vocabulary(self, entity, entity_type):\n self.emitter.emit(Message('register_vocab', {\n 'start': entity, 'end': entity_type\n }))\n\n def register_regex(self, regex_str):\n re.compile(regex_str) # validate regex\n self.emitter.emit(Message('register_vocab', {'regex': regex_str}))\n\n def speak(self, utterance, expect_response=False):\n data = {'utterance': utterance,\n 'expect_response': expect_response}\n self.emitter.emit(Message(\"speak\", data))\n\n def speak_dialog(self, key, data={}, expect_response=False):\n data['expect_response'] = expect_response\n self.speak(self.dialog_renderer.render(key, data))\n\n def init_dialog(self, root_directory):\n dialog_dir = join(root_directory, 'dialog', self.lang)\n if os.path.exists(dialog_dir):\n self.dialog_renderer = DialogLoader().load(dialog_dir)\n else:\n logger.error('No dialog loaded, ' + dialog_dir + ' does not exist')\n\n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n self.load_vocab_files(join(root_directory, 'vocab', self.lang))\n regex_path = join(root_directory, 'regex', self.lang)\n if os.path.exists(regex_path):\n self.load_regex_files(regex_path)\n\n def load_vocab_files(self, vocab_dir):\n if os.path.exists(vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n else:\n logger.error('No vocab loaded, ' + vocab_dir + ' does not exist')\n\n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n\n def __handle_stop(self, event):\n self.stop_time = time.time()\n self.stop()\n\n @abc.abstractmethod\n def stop(self):\n pass\n\n def is_stop(self):\n passed_time = time.time() - self.stop_time\n return passed_time < self.stop_threshold\n\n def shutdown(self):\n \"\"\"\n This method is intended to be called during the skill\n process termination. The skill implementation must\n shutdown all processes and operations in execution.\n \"\"\"\n # Store settings\n self.settings.store()\n\n # removing events\n for e, f in self.events:\n self.emitter.remove(e, f)\n\n self.emitter.emit(\n Message(\"detach_skill\", {\"skill_name\": self.name + \":\"}))\n self.stop()\n", "path": "mycroft/skills/core.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport abc\nimport imp\nimport time\n\nimport os.path\nimport re\nimport time\nfrom os.path import join, dirname, splitext, isdir\n\nfrom adapt.intent import Intent\n\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.dialog import DialogLoader\nfrom mycroft.filesystem import FileSystemAccess\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util.log import getLogger\nfrom mycroft.skills.settings import SkillSettings\n__author__ = 'seanfitz'\n\nskills_config = ConfigurationManager.instance().get(\"skills\")\nBLACKLISTED_SKILLS = skills_config.get(\"blacklisted_skills\", [])\n\nSKILLS_DIR = \"/opt/mycroft/skills\"\n\nMainModule = '__init__'\n\nlogger = getLogger(__name__)\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n if path.endswith('.voc'):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n\n emitter.emit(Message(\"register_vocab\", {\n 'start': entity, 'end': vocab_type\n }))\n for alias in parts[1:]:\n emitter.emit(Message(\"register_vocab\", {\n 'start': alias, 'end': vocab_type, 'alias_of': entity\n }))\n\n\ndef load_regex_from_file(path, emitter):\n if path.endswith('.rx'):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(line.strip())\n emitter.emit(\n Message(\"register_vocab\", {'regex': line.strip()}))\n\n\ndef load_vocabulary(basedir, emitter):\n for vocab_type in os.listdir(basedir):\n if vocab_type.endswith(\".voc\"):\n load_vocab_from_file(\n join(basedir, vocab_type), splitext(vocab_type)[0], emitter)\n\n\ndef load_regex(basedir, emitter):\n for regex_type in os.listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter)\n\n\ndef open_intent_envelope(message):\n intent_dict = message.data\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))\n\n\ndef load_skill(skill_descriptor, emitter):\n try:\n logger.info(\"ATTEMPTING TO LOAD SKILL: \" + skill_descriptor[\"name\"])\n if skill_descriptor['name'] in BLACKLISTED_SKILLS:\n logger.info(\"SKILL IS BLACKLISTED \" + skill_descriptor[\"name\"])\n return None\n skill_module = imp.load_module(\n skill_descriptor[\"name\"] + MainModule, *skill_descriptor[\"info\"])\n if (hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)):\n # v2 skills framework\n skill = skill_module.create_skill()\n skill.bind(emitter)\n skill._dir = dirname(skill_descriptor['info'][1])\n skill.load_data_files(dirname(skill_descriptor['info'][1]))\n skill.initialize()\n logger.info(\"Loaded \" + skill_descriptor[\"name\"])\n return skill\n else:\n logger.warn(\n \"Module %s does not appear to be skill\" % (\n skill_descriptor[\"name\"]))\n except:\n logger.error(\n \"Failed to load skill: \" + skill_descriptor[\"name\"], exc_info=True)\n return None\n\n\ndef get_skills(skills_folder):\n logger.info(\"LOADING SKILLS FROM \" + skills_folder)\n skills = []\n possible_skills = os.listdir(skills_folder)\n for i in possible_skills:\n location = join(skills_folder, i)\n if (isdir(location) and\n not MainModule + \".py\" in os.listdir(location)):\n for j in os.listdir(location):\n name = join(location, j)\n if (not isdir(name) or\n not MainModule + \".py\" in os.listdir(name)):\n continue\n skills.append(create_skill_descriptor(name))\n if (not isdir(location) or\n not MainModule + \".py\" in os.listdir(location)):\n continue\n\n skills.append(create_skill_descriptor(location))\n skills = sorted(skills, key=lambda p: p.get('name'))\n return skills\n\n\ndef create_skill_descriptor(skill_folder):\n info = imp.find_module(MainModule, [skill_folder])\n return {\"name\": os.path.basename(skill_folder), \"info\": info}\n\n\ndef load_skills(emitter, skills_root=SKILLS_DIR):\n logger.info(\"Checking \" + skills_root + \" for new skills\")\n skill_list = []\n for skill in get_skills(skills_root):\n skill_list.append(load_skill(skill, emitter))\n\n return skill_list\n\n\ndef unload_skills(skills):\n for s in skills:\n s.shutdown()\n\n\nclass MycroftSkill(object):\n \"\"\"\n Abstract base class which provides common behaviour and parameters to all\n Skills implementation.\n \"\"\"\n\n def __init__(self, name, emitter=None):\n self.name = name\n self.bind(emitter)\n self.config_core = ConfigurationManager.get()\n self.config = self.config_core.get(name)\n self.dialog_renderer = None\n self.file_system = FileSystemAccess(join('skills', name))\n self.registered_intents = []\n self.log = getLogger(name)\n self.reload_skill = True\n self.events = []\n\n @property\n def location(self):\n \"\"\" Get the JSON data struction holding location information. \"\"\"\n # TODO: Allow Enclosure to override this for devices that\n # contain a GPS.\n return self.config_core.get('location')\n\n @property\n def location_pretty(self):\n \"\"\" Get a more 'human' version of the location as a string. \"\"\"\n loc = self.location\n if type(loc) is dict and loc[\"city\"]:\n return loc[\"city\"][\"name\"]\n return None\n\n @property\n def location_timezone(self):\n \"\"\" Get the timezone code, such as 'America/Los_Angeles' \"\"\"\n loc = self.location\n if type(loc) is dict and loc[\"timezone\"]:\n return loc[\"timezone\"][\"code\"]\n return None\n\n @property\n def lang(self):\n return self.config_core.get('lang')\n\n @property\n def settings(self):\n \"\"\" Load settings if not already loaded. \"\"\"\n try:\n return self._settings\n except:\n self._settings = SkillSettings(join(self._dir, 'settings.json'))\n return self._settings\n\n def bind(self, emitter):\n if emitter:\n self.emitter = emitter\n self.enclosure = EnclosureAPI(emitter)\n self.__register_stop()\n\n def __register_stop(self):\n self.stop_time = time.time()\n self.stop_threshold = self.config_core.get(\"skills\").get(\n 'stop_threshold')\n self.emitter.on('mycroft.stop', self.__handle_stop)\n\n def detach(self):\n for (name, intent) in self.registered_intents:\n name = self.name + ':' + name\n self.emitter.emit(Message(\"detach_intent\", {\"intent_name\": name}))\n\n def initialize(self):\n \"\"\"\n Initialization function to be implemented by all Skills.\n\n Usually used to create intents rules and register them.\n \"\"\"\n raise Exception(\"Initialize not implemented for skill: \" + self.name)\n\n def register_intent(self, intent_parser, handler):\n name = intent_parser.name\n intent_parser.name = self.name + ':' + intent_parser.name\n self.emitter.emit(Message(\"register_intent\", intent_parser.__dict__))\n self.registered_intents.append((name, intent_parser))\n\n def receive_handler(message):\n try:\n handler(message)\n except:\n # TODO: Localize\n self.speak(\n \"An error occurred while processing a request in \" +\n self.name)\n logger.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n\n if handler:\n self.emitter.on(intent_parser.name, receive_handler)\n self.events.append((intent_parser.name, receive_handler))\n\n def disable_intent(self, intent_name):\n \"\"\"Disable a registered intent\"\"\"\n logger.debug('Disabling intent ' + intent_name)\n name = self.name + ':' + intent_name\n self.emitter.emit(Message(\"detach_intent\", {\"intent_name\": name}))\n\n def enable_intent(self, intent_name):\n \"\"\"Reenable a registered intent\"\"\"\n for (name, intent) in self.registered_intents:\n if name == intent_name:\n self.registered_intents.remove((name, intent))\n intent.name = name\n self.register_intent(intent, None)\n logger.debug('Enabling intent ' + intent_name)\n break\n else:\n logger.error('Could not enable ' + intent_name +\n ', it hasn\\'t been registered.')\n\n def register_vocabulary(self, entity, entity_type):\n self.emitter.emit(Message('register_vocab', {\n 'start': entity, 'end': entity_type\n }))\n\n def register_regex(self, regex_str):\n re.compile(regex_str) # validate regex\n self.emitter.emit(Message('register_vocab', {'regex': regex_str}))\n\n def speak(self, utterance, expect_response=False):\n data = {'utterance': utterance,\n 'expect_response': expect_response}\n self.emitter.emit(Message(\"speak\", data))\n\n def speak_dialog(self, key, data={}, expect_response=False):\n data['expect_response'] = expect_response\n self.speak(self.dialog_renderer.render(key, data))\n\n def init_dialog(self, root_directory):\n dialog_dir = join(root_directory, 'dialog', self.lang)\n if os.path.exists(dialog_dir):\n self.dialog_renderer = DialogLoader().load(dialog_dir)\n else:\n logger.debug('No dialog loaded, ' + dialog_dir + ' does not exist')\n\n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n self.load_vocab_files(join(root_directory, 'vocab', self.lang))\n regex_path = join(root_directory, 'regex', self.lang)\n if os.path.exists(regex_path):\n self.load_regex_files(regex_path)\n\n def load_vocab_files(self, vocab_dir):\n if os.path.exists(vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n else:\n logger.debug('No vocab loaded, ' + vocab_dir + ' does not exist')\n\n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n\n def __handle_stop(self, event):\n self.stop_time = time.time()\n self.stop()\n\n @abc.abstractmethod\n def stop(self):\n pass\n\n def is_stop(self):\n passed_time = time.time() - self.stop_time\n return passed_time < self.stop_threshold\n\n def shutdown(self):\n \"\"\"\n This method is intended to be called during the skill\n process termination. The skill implementation must\n shutdown all processes and operations in execution.\n \"\"\"\n # Store settings\n self.settings.store()\n\n # removing events\n for e, f in self.events:\n self.emitter.remove(e, f)\n\n self.emitter.emit(\n Message(\"detach_skill\", {\"skill_name\": self.name + \":\"}))\n self.stop()\n", "path": "mycroft/skills/core.py"}]} | 3,849 | 226 |
gh_patches_debug_39376 | rasdani/github-patches | git_diff | google__TensorNetwork-149 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ncon throws an obscure error for a valid contraction order
Hey, I just noticed that the following code throws an obscure error:
```python
d = 2
D1 = 4
D2= 5
a = tf.random_uniform(shape = [D1,D2,d,d])
b = tf.random_uniform(shape = [D1,d,D1])
c = tf.random_uniform(shape = [D2,d,D2])
f = tf.random_uniform(shape = [d,d,d,d])
tn.ncon([a, b, c, f], [[1,3,5,4], [1,2,-1], [3,4,-2], [5,-4,2,-3]])
```
The order above is not chosen smartly, but `ncon` should either throw an interpretable error, or do the contraction
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensornetwork/ncon_interface.py`
Content:
```
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """NCON interface to TensorNetwork."""
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19 from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any
20 from tensornetwork import network
21 from tensornetwork import network_components
22
23 Tensor = Any
24
25
26 def ncon(tensors: Sequence[Tensor],
27 network_structure: Sequence[Sequence],
28 con_order: Optional[Sequence] = None,
29 out_order: Optional[Sequence] = None) -> Tensor:
30 r"""Contracts a list of tensors according to a tensor network specification.
31
32 The network is provided as a list of lists, one for each
33 tensor, specifying labels for the edges connected to that tensor.
34
35 If a contraction order `con_order` and an output order `out_order`
36 are both provided, the edge labels can be anything.
37 Otherwise (`con_order == None or out_order == None`), the edge labels
38 must be nonzero integers and edges will be contracted in ascending order.
39 Negative integers denote the (dangling) indices of the output tensor,
40 which will be in descending order, e.g. [-1,-2,-3,...].
41
42 For example, matrix multiplication:
43
44 ```python
45 A = np.array([[1.0, 2.0], [3.0, 4.0]])
46 B = np.array([[1.0, 1.0], [0.0, 1.0]])
47 ncon([A,B], [(-1, 1), (1, -2)])
48 ```
49
50 Matrix trace:
51
52 ```python
53 A = np.array([[1.0, 2.0], [3.0, 4.0]])
54 ncon([A], [(1, 1)]) # 5.0
55 ```
56
57 Note: The reason `0` is not allowed as an edge label without manually
58 specifying the contraction order is to maintain compatibility with the
59 [original NCON implementation](https://arxiv.org/abs/1402.0939). However,
60 the use of `0` in `con_order` to denote outer products is not (currently)
61 supported in this implementation.
62
63 Args:
64 tensors: List of `Tensor`s.
65 network_structure: List of lists specifying the tensor network
66 structure.
67 con_order: List of edge labels specifying the contraction order.
68 out_order: List of edge labels specifying the output order.
69
70 Returns:
71 A `Tensor` resulting from the contraction of the tensor network.
72 """
73 tn, con_edges, out_edges = ncon_network(
74 tensors, network_structure, con_order=con_order, out_order=out_order)
75
76 # Contract assuming all edges connecting a given pair of nodes are adjacent
77 # in con_order. If this is not the case, the contraction is sub-optimal
78 # so we throw an exception.
79 prev_nodes = []
80 while con_edges:
81 e = con_edges.pop(0) # pop so that older nodes can be deallocated
82 nodes = e.get_nodes()
83
84 nodes_set = set(nodes)
85 if nodes_set != set(prev_nodes):
86 if not nodes_set.issubset(tn.nodes_set):
87 # the node pair was already contracted
88 raise ValueError("Edge '{}' is not adjacent to other edges connecting "
89 "'{}' and '{}' in the contraction order.".format(
90 e, nodes[0], nodes[1]))
91 if not con_edges and len(tn.nodes_set) == 2:
92 # If this already produces the final output, order the edges
93 # here to avoid transposes in some cases.
94 tn.contract_between(
95 *nodes,
96 name="con({},{})".format(*nodes),
97 output_edge_order=out_edges)
98 else:
99 tn.contract_between(*nodes, name="con({},{})".format(*nodes))
100 prev_nodes = nodes
101
102 # TODO: More efficient ordering of products based on out_edges
103 res_node = tn.outer_product_final_nodes(out_edges)
104
105 return res_node.tensor
106
107
108 def ncon_network(
109 tensors: Sequence[Tensor],
110 network_structure: Sequence[Sequence],
111 con_order: Optional[Sequence] = None,
112 out_order: Optional[Sequence] = None) -> Tuple[network.TensorNetwork, List[
113 network_components.Edge], List[network_components.Edge]]:
114 r"""Creates a TensorNetwork from a list of tensors according to `network`.
115
116 The network is provided as a list of lists, one for each
117 tensor, specifying labels for the edges connected to that tensor.
118
119 If a contraction order `con_order` and an output order `out_order`
120 are both provided, the edge labels can be anything.
121 Otherwise (`con_order == None or out_order == None`), the edge labels
122 must be integers and edges will be contracted in ascending order.
123 Negative integers denote the (dangling) indices of the output tensor,
124 which will be in descending order, e.g. [-1,-2,-3,...].
125
126 This is used internally by `ncon()`.
127
128 Args:
129 tensors: List of `Tensor`s.
130 network_structure: List of lists specifying the tensor network.
131 con_order: List of edge labels specifying the contraction order.
132 out_order: List of edge labels specifying the output order.
133
134 Returns:
135 net: `TensorNetwork` with the structure given by `network`.
136 con_edges: List of internal `Edge` objects in contraction order.
137 out_edges: List of dangling `Edge` objects in output order.
138 """
139 if len(tensors) != len(network_structure):
140 raise ValueError('len(tensors) != len(network_structure)')
141
142 tn, edges = _build_network(tensors, network_structure)
143
144 if con_order is None:
145 try:
146 con_order = sorted((k for k in edges if k >= 0))
147 if con_order and con_order[0] == 0:
148 raise ValueError("'0' is not a valid edge label when the "
149 "contraction order is not specified separately.")
150 except TypeError:
151 raise ValueError("Non-integer edge label(s): {}".format(
152 list(edges.keys())))
153 else:
154 if len(con_order) != len(set(con_order)):
155 raise ValueError("Duplicate labels in con_order: {}".format(con_order))
156
157 if out_order is None:
158 try:
159 out_order = sorted((k for k in edges if k < 0), reverse=True)
160 except TypeError:
161 raise ValueError("Non-integer edge label(s): {}".format(
162 list(edges.keys())))
163 else:
164 if len(out_order) != len(set(out_order)):
165 raise ValueError("Duplicate labels in out_order: {}".format(out_order))
166
167 try:
168 con_edges = [edges[k] for k in con_order]
169 out_edges = [edges[k] for k in out_order]
170 except KeyError as err:
171 raise ValueError("Order contained an unknown edge label: {}".format(
172 err.args[0]))
173
174 if len(con_edges) + len(out_edges) != len(edges):
175 raise ValueError(
176 "Edges {} were not included in the contraction and output "
177 "ordering.".format(
178 list(set(edges.keys()) - set(con_order) - set(out_order))))
179
180 for e in con_edges:
181 if e.is_dangling():
182 raise ValueError(
183 "Contraction edge {} appears only once in the network.".format(
184 str(e)))
185
186 for e in out_edges:
187 if not e.is_dangling():
188 raise ValueError(
189 "Output edge {} appears more than once in the network.".format(
190 str(e)))
191
192 return tn, con_edges, out_edges
193
194
195 def _build_network(
196 tensors: Sequence[Tensor], network_structure: Sequence[Sequence]
197 ) -> Tuple[network.TensorNetwork, Dict[Any, network_components.Edge]]:
198 tn = network.TensorNetwork()
199 nodes = []
200 edges = {}
201 for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):
202 if len(tensor.shape) != len(edge_lbls):
203 raise ValueError(
204 "Incorrect number of edge labels specified tensor {}".format(i))
205
206 node = tn.add_node(tensor, name="tensor_{}".format(i))
207 nodes.append(node)
208
209 for (axis_num, edge_lbl) in enumerate(edge_lbls):
210 if edge_lbl not in edges:
211 e = node[axis_num]
212 e.set_name(str(edge_lbl))
213 edges[edge_lbl] = e
214 else:
215 # This will raise an error if the edges are not dangling.
216 e = tn.connect(edges[edge_lbl], node[axis_num], name=str(edge_lbl))
217 edges[edge_lbl] = e
218 return tn, edges
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py
--- a/tensornetwork/ncon_interface.py
+++ b/tensornetwork/ncon_interface.py
@@ -16,6 +16,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import warnings
from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any
from tensornetwork import network
from tensornetwork import network_components
@@ -73,31 +74,46 @@
tn, con_edges, out_edges = ncon_network(
tensors, network_structure, con_order=con_order, out_order=out_order)
- # Contract assuming all edges connecting a given pair of nodes are adjacent
- # in con_order. If this is not the case, the contraction is sub-optimal
- # so we throw an exception.
- prev_nodes = []
+ # Reverse the list so we can pop from the end: O(1).
+ con_edges = con_edges[::-1]
while con_edges:
- e = con_edges.pop(0) # pop so that older nodes can be deallocated
- nodes = e.get_nodes()
-
- nodes_set = set(nodes)
- if nodes_set != set(prev_nodes):
- if not nodes_set.issubset(tn.nodes_set):
- # the node pair was already contracted
- raise ValueError("Edge '{}' is not adjacent to other edges connecting "
- "'{}' and '{}' in the contraction order.".format(
- e, nodes[0], nodes[1]))
- if not con_edges and len(tn.nodes_set) == 2:
- # If this already produces the final output, order the edges
- # here to avoid transposes in some cases.
- tn.contract_between(
- *nodes,
- name="con({},{})".format(*nodes),
- output_edge_order=out_edges)
+ nodes_to_contract = con_edges[-1].get_nodes()
+ edges_to_contract = tn.get_shared_edges(*nodes_to_contract)
+
+ # Eat up all parallel edges that are adjacent in the ordering.
+ adjacent_parallel_edges = set()
+ for edge in reversed(con_edges):
+ if edge in edges_to_contract:
+ adjacent_parallel_edges.add(edge)
else:
- tn.contract_between(*nodes, name="con({},{})".format(*nodes))
- prev_nodes = nodes
+ break
+ con_edges = con_edges[:-len(adjacent_parallel_edges)]
+
+ # In an optimal ordering, all edges connecting a given pair of nodes are
+ # adjacent in con_order. If this is not the case, warn the user.
+ leftovers = edges_to_contract - adjacent_parallel_edges
+ if leftovers:
+ warnings.warn(
+ "Suboptimal ordering detected. Edges {} are not adjacent in the "
+ "contraction order to edges {}, connecting nodes {}. Deviating from "
+ "the specified ordering!".format(
+ list(map(str, leftovers)),
+ list(map(str, adjacent_parallel_edges)),
+ list(map(str, nodes_to_contract)))
+ )
+ con_edges = [e for e in con_edges if e not in edges_to_contract]
+
+ if set(nodes_to_contract) == tn.nodes_set:
+ # If this already produces the final output, order the edges
+ # here to avoid transposes in some cases.
+ tn.contract_between(
+ *nodes_to_contract,
+ name="con({},{})".format(*nodes_to_contract),
+ output_edge_order=out_edges)
+ else:
+ tn.contract_between(
+ *nodes_to_contract,
+ name="con({},{})".format(*nodes_to_contract))
# TODO: More efficient ordering of products based on out_edges
res_node = tn.outer_product_final_nodes(out_edges)
| {"golden_diff": "diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py\n--- a/tensornetwork/ncon_interface.py\n+++ b/tensornetwork/ncon_interface.py\n@@ -16,6 +16,7 @@\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n+import warnings\n from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any\n from tensornetwork import network\n from tensornetwork import network_components\n@@ -73,31 +74,46 @@\n tn, con_edges, out_edges = ncon_network(\n tensors, network_structure, con_order=con_order, out_order=out_order)\n \n- # Contract assuming all edges connecting a given pair of nodes are adjacent\n- # in con_order. If this is not the case, the contraction is sub-optimal\n- # so we throw an exception.\n- prev_nodes = []\n+ # Reverse the list so we can pop from the end: O(1).\n+ con_edges = con_edges[::-1]\n while con_edges:\n- e = con_edges.pop(0) # pop so that older nodes can be deallocated\n- nodes = e.get_nodes()\n-\n- nodes_set = set(nodes)\n- if nodes_set != set(prev_nodes):\n- if not nodes_set.issubset(tn.nodes_set):\n- # the node pair was already contracted\n- raise ValueError(\"Edge '{}' is not adjacent to other edges connecting \"\n- \"'{}' and '{}' in the contraction order.\".format(\n- e, nodes[0], nodes[1]))\n- if not con_edges and len(tn.nodes_set) == 2:\n- # If this already produces the final output, order the edges \n- # here to avoid transposes in some cases.\n- tn.contract_between(\n- *nodes,\n- name=\"con({},{})\".format(*nodes),\n- output_edge_order=out_edges)\n+ nodes_to_contract = con_edges[-1].get_nodes()\n+ edges_to_contract = tn.get_shared_edges(*nodes_to_contract)\n+\n+ # Eat up all parallel edges that are adjacent in the ordering.\n+ adjacent_parallel_edges = set()\n+ for edge in reversed(con_edges):\n+ if edge in edges_to_contract:\n+ adjacent_parallel_edges.add(edge)\n else:\n- tn.contract_between(*nodes, name=\"con({},{})\".format(*nodes))\n- prev_nodes = nodes\n+ break\n+ con_edges = con_edges[:-len(adjacent_parallel_edges)]\n+\n+ # In an optimal ordering, all edges connecting a given pair of nodes are\n+ # adjacent in con_order. If this is not the case, warn the user.\n+ leftovers = edges_to_contract - adjacent_parallel_edges\n+ if leftovers:\n+ warnings.warn(\n+ \"Suboptimal ordering detected. Edges {} are not adjacent in the \"\n+ \"contraction order to edges {}, connecting nodes {}. Deviating from \"\n+ \"the specified ordering!\".format(\n+ list(map(str, leftovers)),\n+ list(map(str, adjacent_parallel_edges)),\n+ list(map(str, nodes_to_contract)))\n+ )\n+ con_edges = [e for e in con_edges if e not in edges_to_contract]\n+\n+ if set(nodes_to_contract) == tn.nodes_set:\n+ # If this already produces the final output, order the edges\n+ # here to avoid transposes in some cases.\n+ tn.contract_between(\n+ *nodes_to_contract,\n+ name=\"con({},{})\".format(*nodes_to_contract),\n+ output_edge_order=out_edges)\n+ else:\n+ tn.contract_between(\n+ *nodes_to_contract,\n+ name=\"con({},{})\".format(*nodes_to_contract))\n \n # TODO: More efficient ordering of products based on out_edges\n res_node = tn.outer_product_final_nodes(out_edges)\n", "issue": "ncon throws an obscure error for a valid contraction order\nHey, I just noticed that the following code throws an obscure error:\r\n```python \r\nd = 2\r\nD1 = 4\r\nD2= 5\r\na = tf.random_uniform(shape = [D1,D2,d,d])\r\nb = tf.random_uniform(shape = [D1,d,D1])\r\nc = tf.random_uniform(shape = [D2,d,D2])\r\nf = tf.random_uniform(shape = [d,d,d,d])\r\n\r\ntn.ncon([a, b, c, f], [[1,3,5,4], [1,2,-1], [3,4,-2], [5,-4,2,-3]])\r\n```\r\nThe order above is not chosen smartly, but `ncon` should either throw an interpretable error, or do the contraction\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"NCON interface to TensorNetwork.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any\nfrom tensornetwork import network\nfrom tensornetwork import network_components\n\nTensor = Any\n\n\ndef ncon(tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None) -> Tensor:\n r\"\"\"Contracts a list of tensors according to a tensor network specification.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be nonzero integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. [-1,-2,-3,...].\n\n For example, matrix multiplication:\n\n ```python\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n B = np.array([[1.0, 1.0], [0.0, 1.0]])\n ncon([A,B], [(-1, 1), (1, -2)])\n ```\n\n Matrix trace:\n\n ```python\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n ncon([A], [(1, 1)]) # 5.0\n ```\n\n Note: The reason `0` is not allowed as an edge label without manually\n specifying the contraction order is to maintain compatibility with the\n [original NCON implementation](https://arxiv.org/abs/1402.0939). However,\n the use of `0` in `con_order` to denote outer products is not (currently) \n supported in this implementation.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network\n structure.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n\n Returns:\n A `Tensor` resulting from the contraction of the tensor network.\n \"\"\"\n tn, con_edges, out_edges = ncon_network(\n tensors, network_structure, con_order=con_order, out_order=out_order)\n\n # Contract assuming all edges connecting a given pair of nodes are adjacent\n # in con_order. If this is not the case, the contraction is sub-optimal\n # so we throw an exception.\n prev_nodes = []\n while con_edges:\n e = con_edges.pop(0) # pop so that older nodes can be deallocated\n nodes = e.get_nodes()\n\n nodes_set = set(nodes)\n if nodes_set != set(prev_nodes):\n if not nodes_set.issubset(tn.nodes_set):\n # the node pair was already contracted\n raise ValueError(\"Edge '{}' is not adjacent to other edges connecting \"\n \"'{}' and '{}' in the contraction order.\".format(\n e, nodes[0], nodes[1]))\n if not con_edges and len(tn.nodes_set) == 2:\n # If this already produces the final output, order the edges \n # here to avoid transposes in some cases.\n tn.contract_between(\n *nodes,\n name=\"con({},{})\".format(*nodes),\n output_edge_order=out_edges)\n else:\n tn.contract_between(*nodes, name=\"con({},{})\".format(*nodes))\n prev_nodes = nodes\n\n # TODO: More efficient ordering of products based on out_edges\n res_node = tn.outer_product_final_nodes(out_edges)\n\n return res_node.tensor\n\n\ndef ncon_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None) -> Tuple[network.TensorNetwork, List[\n network_components.Edge], List[network_components.Edge]]:\n r\"\"\"Creates a TensorNetwork from a list of tensors according to `network`.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. [-1,-2,-3,...].\n\n This is used internally by `ncon()`.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n\n Returns:\n net: `TensorNetwork` with the structure given by `network`.\n con_edges: List of internal `Edge` objects in contraction order.\n out_edges: List of dangling `Edge` objects in output order.\n \"\"\"\n if len(tensors) != len(network_structure):\n raise ValueError('len(tensors) != len(network_structure)')\n\n tn, edges = _build_network(tensors, network_structure)\n\n if con_order is None:\n try:\n con_order = sorted((k for k in edges if k >= 0))\n if con_order and con_order[0] == 0:\n raise ValueError(\"'0' is not a valid edge label when the \"\n \"contraction order is not specified separately.\")\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(con_order) != len(set(con_order)):\n raise ValueError(\"Duplicate labels in con_order: {}\".format(con_order))\n\n if out_order is None:\n try:\n out_order = sorted((k for k in edges if k < 0), reverse=True)\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(out_order) != len(set(out_order)):\n raise ValueError(\"Duplicate labels in out_order: {}\".format(out_order))\n\n try:\n con_edges = [edges[k] for k in con_order]\n out_edges = [edges[k] for k in out_order]\n except KeyError as err:\n raise ValueError(\"Order contained an unknown edge label: {}\".format(\n err.args[0]))\n\n if len(con_edges) + len(out_edges) != len(edges):\n raise ValueError(\n \"Edges {} were not included in the contraction and output \"\n \"ordering.\".format(\n list(set(edges.keys()) - set(con_order) - set(out_order))))\n\n for e in con_edges:\n if e.is_dangling():\n raise ValueError(\n \"Contraction edge {} appears only once in the network.\".format(\n str(e)))\n\n for e in out_edges:\n if not e.is_dangling():\n raise ValueError(\n \"Output edge {} appears more than once in the network.\".format(\n str(e)))\n\n return tn, con_edges, out_edges\n\n\ndef _build_network(\n tensors: Sequence[Tensor], network_structure: Sequence[Sequence]\n) -> Tuple[network.TensorNetwork, Dict[Any, network_components.Edge]]:\n tn = network.TensorNetwork()\n nodes = []\n edges = {}\n for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):\n if len(tensor.shape) != len(edge_lbls):\n raise ValueError(\n \"Incorrect number of edge labels specified tensor {}\".format(i))\n\n node = tn.add_node(tensor, name=\"tensor_{}\".format(i))\n nodes.append(node)\n\n for (axis_num, edge_lbl) in enumerate(edge_lbls):\n if edge_lbl not in edges:\n e = node[axis_num]\n e.set_name(str(edge_lbl))\n edges[edge_lbl] = e\n else:\n # This will raise an error if the edges are not dangling.\n e = tn.connect(edges[edge_lbl], node[axis_num], name=str(edge_lbl))\n edges[edge_lbl] = e\n return tn, edges\n", "path": "tensornetwork/ncon_interface.py"}], "after_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"NCON interface to TensorNetwork.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport warnings\nfrom typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Any\nfrom tensornetwork import network\nfrom tensornetwork import network_components\n\nTensor = Any\n\n\ndef ncon(tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None) -> Tensor:\n r\"\"\"Contracts a list of tensors according to a tensor network specification.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be nonzero integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. [-1,-2,-3,...].\n\n For example, matrix multiplication:\n\n ```python\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n B = np.array([[1.0, 1.0], [0.0, 1.0]])\n ncon([A,B], [(-1, 1), (1, -2)])\n ```\n\n Matrix trace:\n\n ```python\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n ncon([A], [(1, 1)]) # 5.0\n ```\n\n Note: The reason `0` is not allowed as an edge label without manually\n specifying the contraction order is to maintain compatibility with the\n [original NCON implementation](https://arxiv.org/abs/1402.0939). However,\n the use of `0` in `con_order` to denote outer products is not (currently) \n supported in this implementation.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network\n structure.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n\n Returns:\n A `Tensor` resulting from the contraction of the tensor network.\n \"\"\"\n tn, con_edges, out_edges = ncon_network(\n tensors, network_structure, con_order=con_order, out_order=out_order)\n\n # Reverse the list so we can pop from the end: O(1).\n con_edges = con_edges[::-1]\n while con_edges:\n nodes_to_contract = con_edges[-1].get_nodes()\n edges_to_contract = tn.get_shared_edges(*nodes_to_contract)\n\n # Eat up all parallel edges that are adjacent in the ordering.\n adjacent_parallel_edges = set()\n for edge in reversed(con_edges):\n if edge in edges_to_contract:\n adjacent_parallel_edges.add(edge)\n else:\n break\n con_edges = con_edges[:-len(adjacent_parallel_edges)]\n\n # In an optimal ordering, all edges connecting a given pair of nodes are\n # adjacent in con_order. If this is not the case, warn the user.\n leftovers = edges_to_contract - adjacent_parallel_edges\n if leftovers:\n warnings.warn(\n \"Suboptimal ordering detected. Edges {} are not adjacent in the \"\n \"contraction order to edges {}, connecting nodes {}. Deviating from \"\n \"the specified ordering!\".format(\n list(map(str, leftovers)),\n list(map(str, adjacent_parallel_edges)),\n list(map(str, nodes_to_contract)))\n )\n con_edges = [e for e in con_edges if e not in edges_to_contract]\n\n if set(nodes_to_contract) == tn.nodes_set:\n # If this already produces the final output, order the edges\n # here to avoid transposes in some cases.\n tn.contract_between(\n *nodes_to_contract,\n name=\"con({},{})\".format(*nodes_to_contract),\n output_edge_order=out_edges)\n else:\n tn.contract_between(\n *nodes_to_contract,\n name=\"con({},{})\".format(*nodes_to_contract))\n\n # TODO: More efficient ordering of products based on out_edges\n res_node = tn.outer_product_final_nodes(out_edges)\n\n return res_node.tensor\n\n\ndef ncon_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None) -> Tuple[network.TensorNetwork, List[\n network_components.Edge], List[network_components.Edge]]:\n r\"\"\"Creates a TensorNetwork from a list of tensors according to `network`.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. [-1,-2,-3,...].\n\n This is used internally by `ncon()`.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n\n Returns:\n net: `TensorNetwork` with the structure given by `network`.\n con_edges: List of internal `Edge` objects in contraction order.\n out_edges: List of dangling `Edge` objects in output order.\n \"\"\"\n if len(tensors) != len(network_structure):\n raise ValueError('len(tensors) != len(network_structure)')\n\n tn, edges = _build_network(tensors, network_structure)\n\n if con_order is None:\n try:\n con_order = sorted((k for k in edges if k >= 0))\n if con_order and con_order[0] == 0:\n raise ValueError(\"'0' is not a valid edge label when the \"\n \"contraction order is not specified separately.\")\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(con_order) != len(set(con_order)):\n raise ValueError(\"Duplicate labels in con_order: {}\".format(con_order))\n\n if out_order is None:\n try:\n out_order = sorted((k for k in edges if k < 0), reverse=True)\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(out_order) != len(set(out_order)):\n raise ValueError(\"Duplicate labels in out_order: {}\".format(out_order))\n\n try:\n con_edges = [edges[k] for k in con_order]\n out_edges = [edges[k] for k in out_order]\n except KeyError as err:\n raise ValueError(\"Order contained an unknown edge label: {}\".format(\n err.args[0]))\n\n if len(con_edges) + len(out_edges) != len(edges):\n raise ValueError(\n \"Edges {} were not included in the contraction and output \"\n \"ordering.\".format(\n list(set(edges.keys()) - set(con_order) - set(out_order))))\n\n for e in con_edges:\n if e.is_dangling():\n raise ValueError(\n \"Contraction edge {} appears only once in the network.\".format(\n str(e)))\n\n for e in out_edges:\n if not e.is_dangling():\n raise ValueError(\n \"Output edge {} appears more than once in the network.\".format(\n str(e)))\n\n return tn, con_edges, out_edges\n\n\ndef _build_network(\n tensors: Sequence[Tensor], network_structure: Sequence[Sequence]\n) -> Tuple[network.TensorNetwork, Dict[Any, network_components.Edge]]:\n tn = network.TensorNetwork()\n nodes = []\n edges = {}\n for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):\n if len(tensor.shape) != len(edge_lbls):\n raise ValueError(\n \"Incorrect number of edge labels specified tensor {}\".format(i))\n\n node = tn.add_node(tensor, name=\"tensor_{}\".format(i))\n nodes.append(node)\n\n for (axis_num, edge_lbl) in enumerate(edge_lbls):\n if edge_lbl not in edges:\n e = node[axis_num]\n e.set_name(str(edge_lbl))\n edges[edge_lbl] = e\n else:\n # This will raise an error if the edges are not dangling.\n e = tn.connect(edges[edge_lbl], node[axis_num], name=str(edge_lbl))\n edges[edge_lbl] = e\n return tn, edges\n", "path": "tensornetwork/ncon_interface.py"}]} | 2,981 | 856 |
gh_patches_debug_822 | rasdani/github-patches | git_diff | opendatacube__datacube-core-348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary dependency on `pathlib` when running in python3
### Expected behaviour
Datacube shouldn't depend on unnecessary packages when running in Python 3.
### Actual behaviour
There's a dependency on `pathlib`, which is included in the Python 3 standard library, and so doesn't need to be installed.
This causes trouble on the NCI deployment when trying to load `stats` modules which use the `setuptools` entry_points for their registration. And returns error messages to users trying to load them.
### Steps to reproduce the behaviour
```
module load agdc-py3-prod agdc_statistics
dra547@raijin4:~ $ python
Python 3.6.3 | packaged by conda-forge | (default, Nov 4 2017, 10:10:56)
[GCC 4.8.2 20140120 (Red Hat 4.8.2-15)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import datacube_stats.statistics
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/g/data/v10/public/modules/agdc_statistics/0.9a7/lib/python3.6/site-packages/datacube_stats/statistics.py", line 769, in <module>
STATS[entry_point.name] = entry_point.load()
File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2404, in load
self.require(*args, **kwargs)
File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2427, in require
items = working_set.resolve(reqs, env, installer, extras=self.extras)
File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 870, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'pathlib' distribution was not found and is required by datacube
>>>
```
### The Fix
Modify `setup.py` to use [platform specific dependencies](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies) to only require `pathlib` when not running on python 3.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import versioneer
4 from setuptools import setup, find_packages
5
6 tests_require = [
7 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'
8 ]
9
10 extras_require = {
11 'performance': ['ciso8601', 'bottleneck'],
12 'interactive': ['matplotlib', 'fiona'],
13 'distributed': ['distributed', 'dask[distributed]'],
14 'analytics': ['scipy', 'pyparsing', 'numexpr'],
15 'doc': ['Sphinx', 'setuptools'],
16 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],
17 'celery': ['celery>=4', 'redis'],
18 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],
19 'test': tests_require,
20 }
21 # An 'all' option, following ipython naming conventions.
22 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
23
24 setup(
25 name='datacube',
26 version=versioneer.get_version(),
27 cmdclass=versioneer.get_cmdclass(),
28
29 url='https://github.com/opendatacube/datacube-core',
30 author='AGDC Collaboration',
31 maintainer='AGDC Collaboration',
32 maintainer_email='',
33 description='An analysis environment for satellite and other earth observation data',
34 long_description=open('README.rst').read(),
35 license='Apache License 2.0',
36 classifiers=[
37 "Development Status :: 4 - Beta",
38 "Intended Audience :: Developers",
39 "Intended Audience :: Science/Research",
40 "License :: OSI Approved :: Apache Software License",
41 "Natural Language :: English",
42 "Operating System :: MacOS :: MacOS X",
43 "Operating System :: POSIX",
44 "Operating System :: POSIX :: BSD",
45 "Operating System :: POSIX :: Linux",
46 "Operating System :: Microsoft :: Windows",
47 "Programming Language :: Python",
48 "Programming Language :: Python :: 2",
49 "Programming Language :: Python :: 2.7",
50 "Programming Language :: Python :: 3",
51 "Programming Language :: Python :: 3.5",
52 "Topic :: Scientific/Engineering :: GIS",
53 "Topic :: Scientific/Engineering :: Information Analysis",
54 ],
55
56 packages=find_packages(
57 exclude=('tests', 'tests.*',
58 'integration_tests', 'integration_tests.*')
59 ),
60 package_data={
61 '': ['*.yaml', '*/*.yaml'],
62 },
63 scripts=[
64 'datacube_apps/scripts/pbs_helpers.sh'
65 ],
66 setup_requires=[
67 'pytest-runner'
68 ],
69 install_requires=[
70 'affine',
71 'cachetools',
72 'click>=5.0',
73 'cloudpickle>=0.4',
74 'dask[array]',
75 'gdal>=1.9',
76 'jsonschema',
77 'netcdf4',
78 'numpy',
79 'pathlib',
80 'psycopg2',
81 'pypeg2',
82 'python-dateutil',
83 'pyyaml',
84 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems
85 'singledispatch',
86 'sqlalchemy',
87 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
88 ],
89 extras_require=extras_require,
90 tests_require=tests_require,
91
92 entry_points={
93 'console_scripts': [
94 'datacube-search = datacube.scripts.search_tool:cli',
95 'datacube = datacube.scripts.cli_app:cli',
96 'datacube-stacker = datacube_apps.stacker:main',
97 'datacube-worker = datacube.execution.worker:main',
98 'datacube-fixer = datacube_apps.stacker:fixer_main',
99 'datacube-ncml = datacube_apps.ncml:ncml_app',
100 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',
101 'movie_generator = datacube_apps.movie_generator:main',
102 'datacube-simple-replica = datacube_apps.simple_replica:replicate'
103 ]
104 },
105 )
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@
'jsonschema',
'netcdf4',
'numpy',
- 'pathlib',
+ 'pathlib;python_version<"3"',
'psycopg2',
'pypeg2',
'python-dateutil',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -76,7 +76,7 @@\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n- 'pathlib',\n+ 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n", "issue": "Unnecessary dependency on `pathlib` when running in python3\n### Expected behaviour\r\nDatacube shouldn't depend on unnecessary packages when running in Python 3.\r\n\r\n### Actual behaviour\r\nThere's a dependency on `pathlib`, which is included in the Python 3 standard library, and so doesn't need to be installed.\r\n\r\nThis causes trouble on the NCI deployment when trying to load `stats` modules which use the `setuptools` entry_points for their registration. And returns error messages to users trying to load them.\r\n\r\n### Steps to reproduce the behaviour\r\n```\r\nmodule load agdc-py3-prod agdc_statistics\r\ndra547@raijin4:~ $ python\r\nPython 3.6.3 | packaged by conda-forge | (default, Nov 4 2017, 10:10:56)\r\n[GCC 4.8.2 20140120 (Red Hat 4.8.2-15)] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import datacube_stats.statistics\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/g/data/v10/public/modules/agdc_statistics/0.9a7/lib/python3.6/site-packages/datacube_stats/statistics.py\", line 769, in <module>\r\n STATS[entry_point.name] = entry_point.load()\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 2404, in load\r\n self.require(*args, **kwargs)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 2427, in require\r\n items = working_set.resolve(reqs, env, installer, extras=self.extras)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 870, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'pathlib' distribution was not found and is required by datacube\r\n>>>\r\n```\r\n\r\n### The Fix\r\nModify `setup.py` to use [platform specific dependencies](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies) to only require `pathlib` when not running on python 3.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate'\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate'\n ]\n },\n)\n", "path": "setup.py"}]} | 1,961 | 83 |
gh_patches_debug_47844 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1448 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
POSTGRES_PORT isn't respected during server setup
**Describe the bug**
`POSTGRES_PORT` isn't respected - `docker-compose up --build` starts the postgres instance on port 5432 regardless.
**To Reproduce**
Steps to reproduce the behavior:
1. Follow the [documentation for setting up a new instance](https://docs.joinbookwyrm.com/installing-in-production.html)
2. In `.env`, set `POSTGRES_PORT` to `5433` (and make the corresponding changes in `docker-compose.yml`)
3. Run `docker-compose up --build`
**Expected behavior**
The database is started on port 5433
**Actual behavior**
The database is started on port 5432
**Additional context**
I also tried setting the fallback to `5433` in `bookwyrm/settings.py` in case something was going wrong with the env propagation, but that didn't help either.
I was finally able to workaround by additionally setting `PGPORT` in `.env`
---
**Server:**
- OS: Raspberry Pi OS (raspbian) 10.4
- docker 20.10.8
- docker-compose 1.29.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/settings.py`
Content:
```
1 """ bookwyrm settings and configuration """
2 import os
3 from environs import Env
4
5 import requests
6 from django.utils.translation import gettext_lazy as _
7
8
9 env = Env()
10 DOMAIN = env("DOMAIN")
11 VERSION = "0.0.1"
12
13 PAGE_LENGTH = env("PAGE_LENGTH", 15)
14 DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
15
16 JS_CACHE = "7f2343cf"
17
18 # email
19 EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
20 EMAIL_HOST = env("EMAIL_HOST")
21 EMAIL_PORT = env("EMAIL_PORT", 587)
22 EMAIL_HOST_USER = env("EMAIL_HOST_USER")
23 EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
24 EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
25 EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
26 DEFAULT_FROM_EMAIL = f"admin@{DOMAIN}"
27
28 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
29 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30 LOCALE_PATHS = [
31 os.path.join(BASE_DIR, "locale"),
32 ]
33
34 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
35
36 # Preview image
37 ENABLE_PREVIEW_IMAGES = env.bool("ENABLE_PREVIEW_IMAGES", False)
38 PREVIEW_BG_COLOR = env.str("PREVIEW_BG_COLOR", "use_dominant_color_light")
39 PREVIEW_TEXT_COLOR = env.str("PREVIEW_TEXT_COLOR", "#363636")
40 PREVIEW_IMG_WIDTH = env.int("PREVIEW_IMG_WIDTH", 1200)
41 PREVIEW_IMG_HEIGHT = env.int("PREVIEW_IMG_HEIGHT", 630)
42 PREVIEW_DEFAULT_COVER_COLOR = env.str("PREVIEW_DEFAULT_COVER_COLOR", "#002549")
43
44 # Quick-start development settings - unsuitable for production
45 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
46
47 # SECURITY WARNING: keep the secret key used in production secret!
48 SECRET_KEY = env("SECRET_KEY")
49
50 # SECURITY WARNING: don't run with debug turned on in production!
51 DEBUG = env.bool("DEBUG", True)
52 USE_HTTPS = env.bool("USE_HTTPS", False)
53
54 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["*"])
55
56 # Application definition
57
58 INSTALLED_APPS = [
59 "django.contrib.admin",
60 "django.contrib.auth",
61 "django.contrib.contenttypes",
62 "django.contrib.sessions",
63 "django.contrib.messages",
64 "django.contrib.staticfiles",
65 "django.contrib.humanize",
66 "django_rename_app",
67 "bookwyrm",
68 "celery",
69 "imagekit",
70 "storages",
71 ]
72
73 MIDDLEWARE = [
74 "django.middleware.security.SecurityMiddleware",
75 "django.contrib.sessions.middleware.SessionMiddleware",
76 "django.middleware.locale.LocaleMiddleware",
77 "django.middleware.common.CommonMiddleware",
78 "django.middleware.csrf.CsrfViewMiddleware",
79 "django.contrib.auth.middleware.AuthenticationMiddleware",
80 "bookwyrm.middleware.TimezoneMiddleware",
81 "bookwyrm.middleware.IPBlocklistMiddleware",
82 "django.contrib.messages.middleware.MessageMiddleware",
83 "django.middleware.clickjacking.XFrameOptionsMiddleware",
84 ]
85
86 ROOT_URLCONF = "bookwyrm.urls"
87
88 TEMPLATES = [
89 {
90 "BACKEND": "django.template.backends.django.DjangoTemplates",
91 "DIRS": ["templates"],
92 "APP_DIRS": True,
93 "OPTIONS": {
94 "context_processors": [
95 "django.template.context_processors.debug",
96 "django.template.context_processors.request",
97 "django.contrib.auth.context_processors.auth",
98 "django.contrib.messages.context_processors.messages",
99 "bookwyrm.context_processors.site_settings",
100 ],
101 },
102 },
103 ]
104
105
106 WSGI_APPLICATION = "bookwyrm.wsgi.application"
107
108 # redis/activity streams settings
109 REDIS_ACTIVITY_HOST = env("REDIS_ACTIVITY_HOST", "localhost")
110 REDIS_ACTIVITY_PORT = env("REDIS_ACTIVITY_PORT", 6379)
111 REDIS_ACTIVITY_PASSWORD = env("REDIS_ACTIVITY_PASSWORD", None)
112
113 MAX_STREAM_LENGTH = int(env("MAX_STREAM_LENGTH", 200))
114
115 STREAMS = [
116 {"key": "home", "name": _("Home Timeline"), "shortname": _("Home")},
117 {"key": "books", "name": _("Books Timeline"), "shortname": _("Books")},
118 ]
119
120 # Database
121 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
122
123 DATABASES = {
124 "default": {
125 "ENGINE": "django.db.backends.postgresql_psycopg2",
126 "NAME": env("POSTGRES_DB", "fedireads"),
127 "USER": env("POSTGRES_USER", "fedireads"),
128 "PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
129 "HOST": env("POSTGRES_HOST", ""),
130 "PORT": env("POSTGRES_PORT", 5432),
131 },
132 }
133
134
135 LOGIN_URL = "/login/"
136 AUTH_USER_MODEL = "bookwyrm.User"
137
138 # Password validation
139 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
140
141 # pylint: disable=line-too-long
142 AUTH_PASSWORD_VALIDATORS = [
143 {
144 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
145 },
146 {
147 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
148 },
149 {
150 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
151 },
152 {
153 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
154 },
155 ]
156
157
158 # Internationalization
159 # https://docs.djangoproject.com/en/3.2/topics/i18n/
160
161 LANGUAGE_CODE = "en-us"
162 LANGUAGES = [
163 ("en-us", _("English")),
164 ("de-de", _("German")),
165 ("es", _("Spanish")),
166 ("fr-fr", _("French")),
167 ("zh-hans", _("Simplified Chinese")),
168 ("zh-hant", _("Traditional Chinese")),
169 ]
170
171
172 TIME_ZONE = "UTC"
173
174 USE_I18N = True
175
176 USE_L10N = True
177
178 USE_TZ = True
179
180
181 agent = requests.utils.default_user_agent()
182 USER_AGENT = f"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)"
183
184 # Imagekit generated thumbnails
185 ENABLE_THUMBNAIL_GENERATION = env.bool("ENABLE_THUMBNAIL_GENERATION", False)
186 IMAGEKIT_CACHEFILE_DIR = "thumbnails"
187
188 # Static files (CSS, JavaScript, Images)
189 # https://docs.djangoproject.com/en/3.2/howto/static-files/
190
191 PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
192
193 # Storage
194
195 PROTOCOL = "http"
196 if USE_HTTPS:
197 PROTOCOL = "https"
198
199 USE_S3 = env.bool("USE_S3", False)
200
201 if USE_S3:
202 # AWS settings
203 AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
204 AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
205 AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
206 AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
207 AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", "")
208 AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
209 AWS_DEFAULT_ACL = "public-read"
210 AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
211 # S3 Static settings
212 STATIC_LOCATION = "static"
213 STATIC_URL = f"https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/"
214 STATICFILES_STORAGE = "bookwyrm.storage_backends.StaticStorage"
215 # S3 Media settings
216 MEDIA_LOCATION = "images"
217 MEDIA_URL = f"https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/"
218 MEDIA_FULL_URL = MEDIA_URL
219 DEFAULT_FILE_STORAGE = "bookwyrm.storage_backends.ImagesStorage"
220 # I don't know if it's used, but the site crashes without it
221 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
222 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
223 else:
224 STATIC_URL = "/static/"
225 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
226 MEDIA_URL = "/images/"
227 MEDIA_FULL_URL = f"{PROTOCOL}://{DOMAIN}{MEDIA_URL}"
228 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py
--- a/bookwyrm/settings.py
+++ b/bookwyrm/settings.py
@@ -127,7 +127,7 @@
"USER": env("POSTGRES_USER", "fedireads"),
"PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
"HOST": env("POSTGRES_HOST", ""),
- "PORT": env("POSTGRES_PORT", 5432),
+ "PORT": env("PGPORT", 5432),
},
}
| {"golden_diff": "diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py\n--- a/bookwyrm/settings.py\n+++ b/bookwyrm/settings.py\n@@ -127,7 +127,7 @@\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n- \"PORT\": env(\"POSTGRES_PORT\", 5432),\n+ \"PORT\": env(\"PGPORT\", 5432),\n },\n }\n", "issue": "POSTGRES_PORT isn't respected during server setup\n**Describe the bug**\r\n`POSTGRES_PORT` isn't respected - `docker-compose up --build` starts the postgres instance on port 5432 regardless.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Follow the [documentation for setting up a new instance](https://docs.joinbookwyrm.com/installing-in-production.html)\r\n2. In `.env`, set `POSTGRES_PORT` to `5433` (and make the corresponding changes in `docker-compose.yml`)\r\n3. Run `docker-compose up --build`\r\n\r\n**Expected behavior**\r\nThe database is started on port 5433\r\n\r\n**Actual behavior**\r\nThe database is started on port 5432\r\n\r\n**Additional context**\r\nI also tried setting the fallback to `5433` in `bookwyrm/settings.py` in case something was going wrong with the env propagation, but that didn't help either.\r\nI was finally able to workaround by additionally setting `PGPORT` in `.env`\r\n\r\n---\r\n\r\n**Server:**\r\n - OS: Raspberry Pi OS (raspbian) 10.4\r\n - docker 20.10.8\r\n - docker-compose 1.29.2\r\n\n", "before_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"7f2343cf\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = f\"admin@{DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}], "after_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"7f2343cf\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = f\"admin@{DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"https://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}]} | 2,879 | 123 |
gh_patches_debug_16992 | rasdani/github-patches | git_diff | getsentry__sentry-45065 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alert Page broken after delete project
### Self-Hosted Version
22.8.0
### CPU Architecture
x86_64
### Docker Version
20.10.17, build 100c701
### Docker Compose Version
2.6.0
### Steps to Reproduce
1. create project
2. set a alert rule for it
3. delete this project
### Expected Result
Alert will be deleted too.
### Actual Result
Alert Page broken after delete project.
Returns 403, detail is you don't have permission.
Manually fixed by:
```
\dt
select * from sentry_alertrule;
select * from sentry_alertruleactivity;
select * from sentry_alertruleexcludedprojects;
select * from sentry_alertruletrigger;
select * from sentry_alertruletriggeraction;
select * from sentry_alertruletriggerexclusion;
\dt
select * from sentry_rule;
delete from sentry_rule where id = 2;
select * from sentry_grouprulestatus;
delete from sentry_grouprulestatus where rule_id = 2;
delete from sentry_rule where id = 2;
select * from sentry_ruleactivity;
delete from sentry_ruleactivity where rule_id = 2;
delete from sentry_rule where id = 2;
delete from sentry_rulefirehistory where rule_id = 2;
delete from sentry_rule where id = 2;
select * from sentry_rule;
\dt
select * from sentry_userrole;
select * from sentry_userpermission;
select * from sentry_userrole_users;
select * from sentry_ruleactivity;
select * from sentry_rule;
select * from sentry_ruleactivity_id_seq;
select * from sentry_rulefirehistory;
select * from sentry_rulefirehistory_id_seq;
select * from sentry_rule_id_seq;
\q
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/incidents/endpoints/organization_alert_rule_index.py`
Content:
```
1 from datetime import datetime
2
3 from django.db.models import DateTimeField, IntegerField, OuterRef, Q, Subquery, Value
4 from django.db.models.functions import Coalesce
5 from django.utils.timezone import make_aware
6 from rest_framework import status
7 from rest_framework.request import Request
8 from rest_framework.response import Response
9
10 from sentry import features
11 from sentry.api.base import region_silo_endpoint
12 from sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint
13 from sentry.api.exceptions import ResourceDoesNotExist
14 from sentry.api.paginator import (
15 CombinedQuerysetIntermediary,
16 CombinedQuerysetPaginator,
17 OffsetPaginator,
18 )
19 from sentry.api.serializers import serialize
20 from sentry.api.serializers.models.alert_rule import CombinedRuleSerializer
21 from sentry.api.utils import InvalidParams
22 from sentry.incidents.models import AlertRule, Incident
23 from sentry.incidents.serializers import AlertRuleSerializer
24 from sentry.models import OrganizationMemberTeam, Project, ProjectStatus, Rule, RuleStatus, Team
25 from sentry.snuba.dataset import Dataset
26 from sentry.utils.cursors import Cursor, StringCursor
27
28 from .utils import parse_team_params
29
30
31 @region_silo_endpoint
32 class OrganizationCombinedRuleIndexEndpoint(OrganizationEndpoint):
33 def get(self, request: Request, organization) -> Response:
34 """
35 Fetches alert rules and legacy rules for an organization
36 """
37 project_ids = self.get_requested_project_ids_unchecked(request) or None
38 if project_ids == {-1}: # All projects for org:
39 project_ids = Project.objects.filter(
40 organization=organization, status=ProjectStatus.VISIBLE
41 ).values_list("id", flat=True)
42 elif project_ids is None: # All projects for user
43 org_team_list = Team.objects.filter(organization=organization).values_list(
44 "id", flat=True
45 )
46 user_team_list = OrganizationMemberTeam.objects.filter(
47 organizationmember__user=request.user, team__in=org_team_list
48 ).values_list("team", flat=True)
49 project_ids = Project.objects.filter(teams__in=user_team_list).values_list(
50 "id", flat=True
51 )
52
53 # Materialize the project ids here. This helps us to not overwhelm the query planner with
54 # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal
55 # index to filter on. Also enforces permission checks.
56 projects = self.get_projects(request, organization, project_ids=set(project_ids))
57
58 teams = request.GET.getlist("team", [])
59 team_filter_query = None
60 if len(teams) > 0:
61 try:
62 teams_query, unassigned = parse_team_params(request, organization, teams)
63 except InvalidParams as err:
64 return Response(str(err), status=status.HTTP_400_BAD_REQUEST)
65
66 team_filter_query = Q(owner_id__in=teams_query.values_list("actor_id", flat=True))
67 if unassigned:
68 team_filter_query = team_filter_query | Q(owner_id=None)
69
70 alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)
71 if not features.has("organizations:performance-view", organization):
72 # Filter to only error alert rules
73 alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)
74 issue_rules = Rule.objects.filter(
75 status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE], project__in=projects
76 )
77 name = request.GET.get("name", None)
78 if name:
79 alert_rules = alert_rules.filter(Q(name__icontains=name))
80 issue_rules = issue_rules.filter(Q(label__icontains=name))
81
82 if team_filter_query:
83 alert_rules = alert_rules.filter(team_filter_query)
84 issue_rules = issue_rules.filter(team_filter_query)
85
86 expand = request.GET.getlist("expand", [])
87 if "latestIncident" in expand:
88 alert_rules = alert_rules.annotate(
89 incident_id=Coalesce(
90 Subquery(
91 Incident.objects.filter(alert_rule=OuterRef("pk"))
92 .order_by("-date_started")
93 .values("id")[:1]
94 ),
95 Value("-1"),
96 )
97 )
98
99 is_asc = request.GET.get("asc", False) == "1"
100 sort_key = request.GET.getlist("sort", ["date_added"])
101 rule_sort_key = [
102 "label" if x == "name" else x for x in sort_key
103 ] # Rule's don't share the same field name for their title/label/name...so we account for that here.
104 case_insensitive = sort_key == ["name"]
105
106 if "incident_status" in sort_key:
107 alert_rules = alert_rules.annotate(
108 incident_status=Coalesce(
109 Subquery(
110 Incident.objects.filter(alert_rule=OuterRef("pk"))
111 .order_by("-date_started")
112 .values("status")[:1]
113 ),
114 Value(-1, output_field=IntegerField()),
115 )
116 )
117 issue_rules = issue_rules.annotate(
118 incident_status=Value(-2, output_field=IntegerField())
119 )
120
121 if "date_triggered" in sort_key:
122 far_past_date = Value(make_aware(datetime.min), output_field=DateTimeField())
123 alert_rules = alert_rules.annotate(
124 date_triggered=Coalesce(
125 Subquery(
126 Incident.objects.filter(alert_rule=OuterRef("pk"))
127 .order_by("-date_started")
128 .values("date_started")[:1]
129 ),
130 far_past_date,
131 ),
132 )
133 issue_rules = issue_rules.annotate(date_triggered=far_past_date)
134 alert_rules_count = alert_rules.count()
135 issue_rules_count = issue_rules.count()
136 alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, sort_key)
137 rule_intermediary = CombinedQuerysetIntermediary(issue_rules, rule_sort_key)
138 response = self.paginate(
139 request,
140 paginator_cls=CombinedQuerysetPaginator,
141 on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer(expand=expand)),
142 default_per_page=25,
143 intermediaries=[alert_rule_intermediary, rule_intermediary],
144 desc=not is_asc,
145 cursor_cls=StringCursor if case_insensitive else Cursor,
146 case_insensitive=case_insensitive,
147 )
148 response["X-Sentry-Issue-Rule-Hits"] = issue_rules_count
149 response["X-Sentry-Alert-Rule-Hits"] = alert_rules_count
150 return response
151
152
153 @region_silo_endpoint
154 class OrganizationAlertRuleIndexEndpoint(OrganizationEndpoint):
155 permission_classes = (OrganizationAlertRulePermission,)
156
157 def get(self, request: Request, organization) -> Response:
158 """
159 Fetches alert rules for an organization
160 """
161 if not features.has("organizations:incidents", organization, actor=request.user):
162 raise ResourceDoesNotExist
163
164 projects = self.get_projects(request, organization)
165 alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)
166 if not features.has("organizations:performance-view", organization):
167 # Filter to only error alert rules
168 alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)
169
170 return self.paginate(
171 request,
172 queryset=alert_rules,
173 order_by="-date_added",
174 paginator_cls=OffsetPaginator,
175 on_results=lambda x: serialize(x, request.user),
176 default_per_page=25,
177 )
178
179 def post(self, request: Request, organization) -> Response:
180 """
181 Create an alert rule
182 """
183
184 if not features.has("organizations:incidents", organization, actor=request.user):
185 raise ResourceDoesNotExist
186
187 serializer = AlertRuleSerializer(
188 context={"organization": organization, "access": request.access, "user": request.user},
189 data=request.data,
190 )
191
192 if serializer.is_valid():
193 alert_rule = serializer.save()
194 return Response(serialize(alert_rule, request.user), status=status.HTTP_201_CREATED)
195
196 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_index.py
--- a/src/sentry/incidents/endpoints/organization_alert_rule_index.py
+++ b/src/sentry/incidents/endpoints/organization_alert_rule_index.py
@@ -46,9 +46,9 @@
user_team_list = OrganizationMemberTeam.objects.filter(
organizationmember__user=request.user, team__in=org_team_list
).values_list("team", flat=True)
- project_ids = Project.objects.filter(teams__in=user_team_list).values_list(
- "id", flat=True
- )
+ project_ids = Project.objects.filter(
+ teams__in=user_team_list, status=ProjectStatus.VISIBLE
+ ).values_list("id", flat=True)
# Materialize the project ids here. This helps us to not overwhelm the query planner with
# overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal
| {"golden_diff": "diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_index.py\n--- a/src/sentry/incidents/endpoints/organization_alert_rule_index.py\n+++ b/src/sentry/incidents/endpoints/organization_alert_rule_index.py\n@@ -46,9 +46,9 @@\n user_team_list = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user, team__in=org_team_list\n ).values_list(\"team\", flat=True)\n- project_ids = Project.objects.filter(teams__in=user_team_list).values_list(\n- \"id\", flat=True\n- )\n+ project_ids = Project.objects.filter(\n+ teams__in=user_team_list, status=ProjectStatus.VISIBLE\n+ ).values_list(\"id\", flat=True)\n \n # Materialize the project ids here. This helps us to not overwhelm the query planner with\n # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal\n", "issue": "Alert Page broken after delete project \n### Self-Hosted Version\n\n22.8.0\n\n### CPU Architecture\n\nx86_64\n\n### Docker Version\n\n20.10.17, build 100c701\n\n### Docker Compose Version\n\n2.6.0\n\n### Steps to Reproduce\n\n1. create project\r\n2. set a alert rule for it\r\n3. delete this project\n\n### Expected Result\n\nAlert will be deleted too.\n\n### Actual Result\n\nAlert Page broken after delete project.\r\nReturns 403, detail is you don't have permission.\r\nManually fixed by:\r\n```\r\n\\dt\r\nselect * from sentry_alertrule;\r\nselect * from sentry_alertruleactivity;\r\nselect * from sentry_alertruleexcludedprojects;\r\nselect * from sentry_alertruletrigger;\r\nselect * from sentry_alertruletriggeraction;\r\nselect * from sentry_alertruletriggerexclusion;\r\n\\dt\r\nselect * from sentry_rule;\r\ndelete from sentry_rule where id = 2;\r\nselect * from sentry_grouprulestatus;\r\ndelete from sentry_grouprulestatus where rule_id = 2;\r\ndelete from sentry_rule where id = 2;\r\nselect * from sentry_ruleactivity;\r\ndelete from sentry_ruleactivity where rule_id = 2;\r\ndelete from sentry_rule where id = 2;\r\ndelete from sentry_rulefirehistory where rule_id = 2;\r\ndelete from sentry_rule where id = 2;\r\nselect * from sentry_rule;\r\n\\dt\r\nselect * from sentry_userrole;\r\nselect * from sentry_userpermission;\r\nselect * from sentry_userrole_users;\r\nselect * from sentry_ruleactivity;\r\nselect * from sentry_rule;\r\nselect * from sentry_ruleactivity_id_seq;\r\nselect * from sentry_rulefirehistory;\r\nselect * from sentry_rulefirehistory_id_seq;\r\nselect * from sentry_rule_id_seq;\r\n\\q\r\n\r\n```\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.db.models import DateTimeField, IntegerField, OuterRef, Q, Subquery, Value\nfrom django.db.models.functions import Coalesce\nfrom django.utils.timezone import make_aware\nfrom rest_framework import status\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.paginator import (\n CombinedQuerysetIntermediary,\n CombinedQuerysetPaginator,\n OffsetPaginator,\n)\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.models.alert_rule import CombinedRuleSerializer\nfrom sentry.api.utils import InvalidParams\nfrom sentry.incidents.models import AlertRule, Incident\nfrom sentry.incidents.serializers import AlertRuleSerializer\nfrom sentry.models import OrganizationMemberTeam, Project, ProjectStatus, Rule, RuleStatus, Team\nfrom sentry.snuba.dataset import Dataset\nfrom sentry.utils.cursors import Cursor, StringCursor\n\nfrom .utils import parse_team_params\n\n\n@region_silo_endpoint\nclass OrganizationCombinedRuleIndexEndpoint(OrganizationEndpoint):\n def get(self, request: Request, organization) -> Response:\n \"\"\"\n Fetches alert rules and legacy rules for an organization\n \"\"\"\n project_ids = self.get_requested_project_ids_unchecked(request) or None\n if project_ids == {-1}: # All projects for org:\n project_ids = Project.objects.filter(\n organization=organization, status=ProjectStatus.VISIBLE\n ).values_list(\"id\", flat=True)\n elif project_ids is None: # All projects for user\n org_team_list = Team.objects.filter(organization=organization).values_list(\n \"id\", flat=True\n )\n user_team_list = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user, team__in=org_team_list\n ).values_list(\"team\", flat=True)\n project_ids = Project.objects.filter(teams__in=user_team_list).values_list(\n \"id\", flat=True\n )\n\n # Materialize the project ids here. This helps us to not overwhelm the query planner with\n # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal\n # index to filter on. Also enforces permission checks.\n projects = self.get_projects(request, organization, project_ids=set(project_ids))\n\n teams = request.GET.getlist(\"team\", [])\n team_filter_query = None\n if len(teams) > 0:\n try:\n teams_query, unassigned = parse_team_params(request, organization, teams)\n except InvalidParams as err:\n return Response(str(err), status=status.HTTP_400_BAD_REQUEST)\n\n team_filter_query = Q(owner_id__in=teams_query.values_list(\"actor_id\", flat=True))\n if unassigned:\n team_filter_query = team_filter_query | Q(owner_id=None)\n\n alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n issue_rules = Rule.objects.filter(\n status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE], project__in=projects\n )\n name = request.GET.get(\"name\", None)\n if name:\n alert_rules = alert_rules.filter(Q(name__icontains=name))\n issue_rules = issue_rules.filter(Q(label__icontains=name))\n\n if team_filter_query:\n alert_rules = alert_rules.filter(team_filter_query)\n issue_rules = issue_rules.filter(team_filter_query)\n\n expand = request.GET.getlist(\"expand\", [])\n if \"latestIncident\" in expand:\n alert_rules = alert_rules.annotate(\n incident_id=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"id\")[:1]\n ),\n Value(\"-1\"),\n )\n )\n\n is_asc = request.GET.get(\"asc\", False) == \"1\"\n sort_key = request.GET.getlist(\"sort\", [\"date_added\"])\n rule_sort_key = [\n \"label\" if x == \"name\" else x for x in sort_key\n ] # Rule's don't share the same field name for their title/label/name...so we account for that here.\n case_insensitive = sort_key == [\"name\"]\n\n if \"incident_status\" in sort_key:\n alert_rules = alert_rules.annotate(\n incident_status=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"status\")[:1]\n ),\n Value(-1, output_field=IntegerField()),\n )\n )\n issue_rules = issue_rules.annotate(\n incident_status=Value(-2, output_field=IntegerField())\n )\n\n if \"date_triggered\" in sort_key:\n far_past_date = Value(make_aware(datetime.min), output_field=DateTimeField())\n alert_rules = alert_rules.annotate(\n date_triggered=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"date_started\")[:1]\n ),\n far_past_date,\n ),\n )\n issue_rules = issue_rules.annotate(date_triggered=far_past_date)\n alert_rules_count = alert_rules.count()\n issue_rules_count = issue_rules.count()\n alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, sort_key)\n rule_intermediary = CombinedQuerysetIntermediary(issue_rules, rule_sort_key)\n response = self.paginate(\n request,\n paginator_cls=CombinedQuerysetPaginator,\n on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer(expand=expand)),\n default_per_page=25,\n intermediaries=[alert_rule_intermediary, rule_intermediary],\n desc=not is_asc,\n cursor_cls=StringCursor if case_insensitive else Cursor,\n case_insensitive=case_insensitive,\n )\n response[\"X-Sentry-Issue-Rule-Hits\"] = issue_rules_count\n response[\"X-Sentry-Alert-Rule-Hits\"] = alert_rules_count\n return response\n\n\n@region_silo_endpoint\nclass OrganizationAlertRuleIndexEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationAlertRulePermission,)\n\n def get(self, request: Request, organization) -> Response:\n \"\"\"\n Fetches alert rules for an organization\n \"\"\"\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n projects = self.get_projects(request, organization)\n alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n return self.paginate(\n request,\n queryset=alert_rules,\n order_by=\"-date_added\",\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user),\n default_per_page=25,\n )\n\n def post(self, request: Request, organization) -> Response:\n \"\"\"\n Create an alert rule\n \"\"\"\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n serializer = AlertRuleSerializer(\n context={\"organization\": organization, \"access\": request.access, \"user\": request.user},\n data=request.data,\n )\n\n if serializer.is_valid():\n alert_rule = serializer.save()\n return Response(serialize(alert_rule, request.user), status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "path": "src/sentry/incidents/endpoints/organization_alert_rule_index.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.db.models import DateTimeField, IntegerField, OuterRef, Q, Subquery, Value\nfrom django.db.models.functions import Coalesce\nfrom django.utils.timezone import make_aware\nfrom rest_framework import status\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.paginator import (\n CombinedQuerysetIntermediary,\n CombinedQuerysetPaginator,\n OffsetPaginator,\n)\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.models.alert_rule import CombinedRuleSerializer\nfrom sentry.api.utils import InvalidParams\nfrom sentry.incidents.models import AlertRule, Incident\nfrom sentry.incidents.serializers import AlertRuleSerializer\nfrom sentry.models import OrganizationMemberTeam, Project, ProjectStatus, Rule, RuleStatus, Team\nfrom sentry.snuba.dataset import Dataset\nfrom sentry.utils.cursors import Cursor, StringCursor\n\nfrom .utils import parse_team_params\n\n\n@region_silo_endpoint\nclass OrganizationCombinedRuleIndexEndpoint(OrganizationEndpoint):\n def get(self, request: Request, organization) -> Response:\n \"\"\"\n Fetches alert rules and legacy rules for an organization\n \"\"\"\n project_ids = self.get_requested_project_ids_unchecked(request) or None\n if project_ids == {-1}: # All projects for org:\n project_ids = Project.objects.filter(\n organization=organization, status=ProjectStatus.VISIBLE\n ).values_list(\"id\", flat=True)\n elif project_ids is None: # All projects for user\n org_team_list = Team.objects.filter(organization=organization).values_list(\n \"id\", flat=True\n )\n user_team_list = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user, team__in=org_team_list\n ).values_list(\"team\", flat=True)\n project_ids = Project.objects.filter(\n teams__in=user_team_list, status=ProjectStatus.VISIBLE\n ).values_list(\"id\", flat=True)\n\n # Materialize the project ids here. This helps us to not overwhelm the query planner with\n # overcomplicated subqueries. Previously, this was causing Postgres to use a suboptimal\n # index to filter on. Also enforces permission checks.\n projects = self.get_projects(request, organization, project_ids=set(project_ids))\n\n teams = request.GET.getlist(\"team\", [])\n team_filter_query = None\n if len(teams) > 0:\n try:\n teams_query, unassigned = parse_team_params(request, organization, teams)\n except InvalidParams as err:\n return Response(str(err), status=status.HTTP_400_BAD_REQUEST)\n\n team_filter_query = Q(owner_id__in=teams_query.values_list(\"actor_id\", flat=True))\n if unassigned:\n team_filter_query = team_filter_query | Q(owner_id=None)\n\n alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n issue_rules = Rule.objects.filter(\n status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE], project__in=projects\n )\n name = request.GET.get(\"name\", None)\n if name:\n alert_rules = alert_rules.filter(Q(name__icontains=name))\n issue_rules = issue_rules.filter(Q(label__icontains=name))\n\n if team_filter_query:\n alert_rules = alert_rules.filter(team_filter_query)\n issue_rules = issue_rules.filter(team_filter_query)\n\n expand = request.GET.getlist(\"expand\", [])\n if \"latestIncident\" in expand:\n alert_rules = alert_rules.annotate(\n incident_id=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"id\")[:1]\n ),\n Value(\"-1\"),\n )\n )\n\n is_asc = request.GET.get(\"asc\", False) == \"1\"\n sort_key = request.GET.getlist(\"sort\", [\"date_added\"])\n rule_sort_key = [\n \"label\" if x == \"name\" else x for x in sort_key\n ] # Rule's don't share the same field name for their title/label/name...so we account for that here.\n case_insensitive = sort_key == [\"name\"]\n\n if \"incident_status\" in sort_key:\n alert_rules = alert_rules.annotate(\n incident_status=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"status\")[:1]\n ),\n Value(-1, output_field=IntegerField()),\n )\n )\n issue_rules = issue_rules.annotate(\n incident_status=Value(-2, output_field=IntegerField())\n )\n\n if \"date_triggered\" in sort_key:\n far_past_date = Value(make_aware(datetime.min), output_field=DateTimeField())\n alert_rules = alert_rules.annotate(\n date_triggered=Coalesce(\n Subquery(\n Incident.objects.filter(alert_rule=OuterRef(\"pk\"))\n .order_by(\"-date_started\")\n .values(\"date_started\")[:1]\n ),\n far_past_date,\n ),\n )\n issue_rules = issue_rules.annotate(date_triggered=far_past_date)\n alert_rules_count = alert_rules.count()\n issue_rules_count = issue_rules.count()\n alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, sort_key)\n rule_intermediary = CombinedQuerysetIntermediary(issue_rules, rule_sort_key)\n response = self.paginate(\n request,\n paginator_cls=CombinedQuerysetPaginator,\n on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer(expand=expand)),\n default_per_page=25,\n intermediaries=[alert_rule_intermediary, rule_intermediary],\n desc=not is_asc,\n cursor_cls=StringCursor if case_insensitive else Cursor,\n case_insensitive=case_insensitive,\n )\n response[\"X-Sentry-Issue-Rule-Hits\"] = issue_rules_count\n response[\"X-Sentry-Alert-Rule-Hits\"] = alert_rules_count\n return response\n\n\n@region_silo_endpoint\nclass OrganizationAlertRuleIndexEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationAlertRulePermission,)\n\n def get(self, request: Request, organization) -> Response:\n \"\"\"\n Fetches alert rules for an organization\n \"\"\"\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n projects = self.get_projects(request, organization)\n alert_rules = AlertRule.objects.fetch_for_organization(organization, projects)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n return self.paginate(\n request,\n queryset=alert_rules,\n order_by=\"-date_added\",\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user),\n default_per_page=25,\n )\n\n def post(self, request: Request, organization) -> Response:\n \"\"\"\n Create an alert rule\n \"\"\"\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n serializer = AlertRuleSerializer(\n context={\"organization\": organization, \"access\": request.access, \"user\": request.user},\n data=request.data,\n )\n\n if serializer.is_valid():\n alert_rule = serializer.save()\n return Response(serialize(alert_rule, request.user), status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "path": "src/sentry/incidents/endpoints/organization_alert_rule_index.py"}]} | 2,856 | 221 |
gh_patches_debug_5025 | rasdani/github-patches | git_diff | tobymao__sqlglot-2095 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to parse the oracle query when "#" is present in the column names
Hello,
Parser is throwing an error when there is "#" character is present in the column name.
**Code for your reference -**
```
import sqlglot.expressions as exp
query ="""
SELECT A.COL#, B.COL2 FROM SCHEMA1.TABLE1 A
"""
expressions = sqlglot.parse_one(query, read='oracle')
x = list(expressions.find_all(exp.Table))
for i in x:
listTables = str(i).split(' ')[0]
print(listTables)
```
Above code fails as it cannot process '#' and throws the error.
Request you to add support for '#' for query parsing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/oracle.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import seq_get
8 from sqlglot.tokens import TokenType
9
10
11 def _parse_xml_table(self: parser.Parser) -> exp.XMLTable:
12 this = self._parse_string()
13
14 passing = None
15 columns = None
16
17 if self._match_text_seq("PASSING"):
18 # The BY VALUE keywords are optional and are provided for semantic clarity
19 self._match_text_seq("BY", "VALUE")
20 passing = self._parse_csv(self._parse_column)
21
22 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
23
24 if self._match_text_seq("COLUMNS"):
25 columns = self._parse_csv(self._parse_field_def)
26
27 return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
28
29
30 class Oracle(Dialect):
31 ALIAS_POST_TABLESAMPLE = True
32
33 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
34 RESOLVES_IDENTIFIERS_AS_UPPERCASE = True
35
36 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
37 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
38 TIME_MAPPING = {
39 "AM": "%p", # Meridian indicator with or without periods
40 "A.M.": "%p", # Meridian indicator with or without periods
41 "PM": "%p", # Meridian indicator with or without periods
42 "P.M.": "%p", # Meridian indicator with or without periods
43 "D": "%u", # Day of week (1-7)
44 "DAY": "%A", # name of day
45 "DD": "%d", # day of month (1-31)
46 "DDD": "%j", # day of year (1-366)
47 "DY": "%a", # abbreviated name of day
48 "HH": "%I", # Hour of day (1-12)
49 "HH12": "%I", # alias for HH
50 "HH24": "%H", # Hour of day (0-23)
51 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
52 "MI": "%M", # Minute (0-59)
53 "MM": "%m", # Month (01-12; January = 01)
54 "MON": "%b", # Abbreviated name of month
55 "MONTH": "%B", # Name of month
56 "SS": "%S", # Second (0-59)
57 "WW": "%W", # Week of year (1-53)
58 "YY": "%y", # 15
59 "YYYY": "%Y", # 2015
60 }
61
62 class Parser(parser.Parser):
63 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
64
65 FUNCTIONS = {
66 **parser.Parser.FUNCTIONS,
67 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
68 }
69
70 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
71 **parser.Parser.FUNCTION_PARSERS,
72 "XMLTABLE": _parse_xml_table,
73 }
74
75 TYPE_LITERAL_PARSERS = {
76 exp.DataType.Type.DATE: lambda self, this, _: self.expression(
77 exp.DateStrToDate, this=this
78 )
79 }
80
81 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..
82 # Reference: https://stackoverflow.com/a/336455
83 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}
84
85 def _parse_column(self) -> t.Optional[exp.Expression]:
86 column = super()._parse_column()
87 if column:
88 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
89 return column
90
91 def _parse_hint(self) -> t.Optional[exp.Hint]:
92 if self._match(TokenType.HINT):
93 start = self._curr
94 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
95 self._advance()
96
97 if not self._curr:
98 self.raise_error("Expected */ after HINT")
99
100 end = self._tokens[self._index - 3]
101 return exp.Hint(expressions=[self._find_sql(start, end)])
102
103 return None
104
105 class Generator(generator.Generator):
106 LOCKING_READS_SUPPORTED = True
107 JOIN_HINTS = False
108 TABLE_HINTS = False
109 COLUMN_JOIN_MARKS_SUPPORTED = True
110
111 LIMIT_FETCH = "FETCH"
112
113 TYPE_MAPPING = {
114 **generator.Generator.TYPE_MAPPING,
115 exp.DataType.Type.TINYINT: "NUMBER",
116 exp.DataType.Type.SMALLINT: "NUMBER",
117 exp.DataType.Type.INT: "NUMBER",
118 exp.DataType.Type.BIGINT: "NUMBER",
119 exp.DataType.Type.DECIMAL: "NUMBER",
120 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
121 exp.DataType.Type.VARCHAR: "VARCHAR2",
122 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
123 exp.DataType.Type.NCHAR: "NCHAR",
124 exp.DataType.Type.TEXT: "CLOB",
125 exp.DataType.Type.BINARY: "BLOB",
126 exp.DataType.Type.VARBINARY: "BLOB",
127 }
128
129 TRANSFORMS = {
130 **generator.Generator.TRANSFORMS,
131 exp.DateStrToDate: lambda self, e: self.func(
132 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
133 ),
134 exp.Group: transforms.preprocess([transforms.unalias_group]),
135 exp.ILike: no_ilike_sql,
136 exp.Coalesce: rename_func("NVL"),
137 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
138 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
139 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
140 exp.Substring: rename_func("SUBSTR"),
141 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
142 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
143 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
144 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
145 exp.Trim: trim_sql,
146 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
147 }
148
149 PROPERTIES_LOCATION = {
150 **generator.Generator.PROPERTIES_LOCATION,
151 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
152 }
153
154 def offset_sql(self, expression: exp.Offset) -> str:
155 return f"{super().offset_sql(expression)} ROWS"
156
157 def xmltable_sql(self, expression: exp.XMLTable) -> str:
158 this = self.sql(expression, "this")
159 passing = self.expressions(expression, key="passing")
160 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
161 columns = self.expressions(expression, key="columns")
162 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
163 by_ref = (
164 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
165 )
166 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
167
168 class Tokenizer(tokens.Tokenizer):
169 VAR_SINGLE_TOKENS = {"@"}
170
171 KEYWORDS = {
172 **tokens.Tokenizer.KEYWORDS,
173 "(+)": TokenType.JOIN_MARKER,
174 "BINARY_DOUBLE": TokenType.DOUBLE,
175 "BINARY_FLOAT": TokenType.FLOAT,
176 "COLUMNS": TokenType.COLUMN,
177 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
178 "MINUS": TokenType.EXCEPT,
179 "NVARCHAR2": TokenType.NVARCHAR,
180 "SAMPLE": TokenType.TABLE_SAMPLE,
181 "START": TokenType.BEGIN,
182 "TOP": TokenType.TOP,
183 "VARCHAR2": TokenType.VARCHAR,
184 }
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -166,7 +166,7 @@
return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
class Tokenizer(tokens.Tokenizer):
- VAR_SINGLE_TOKENS = {"@"}
+ VAR_SINGLE_TOKENS = {"@", "$", "#"}
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
| {"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -166,7 +166,7 @@\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n \n class Tokenizer(tokens.Tokenizer):\n- VAR_SINGLE_TOKENS = {\"@\"}\n+ VAR_SINGLE_TOKENS = {\"@\", \"$\", \"#\"}\n \n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n", "issue": "Unable to parse the oracle query when \"#\" is present in the column names\nHello,\r\n\r\nParser is throwing an error when there is \"#\" character is present in the column name.\r\n\r\n**Code for your reference -**\r\n\r\n```\r\nimport sqlglot.expressions as exp\r\n\r\nquery =\"\"\"\r\nSELECT A.COL#, B.COL2 FROM SCHEMA1.TABLE1 A\r\n\"\"\"\r\n\r\nexpressions = sqlglot.parse_one(query, read='oracle')\r\nx = list(expressions.find_all(exp.Table))\r\n\r\nfor i in x:\r\n listTables = str(i).split(' ')[0]\r\n print(listTables)\r\n```\r\n\r\n\r\nAbove code fails as it cannot process '#' and throws the error.\r\nRequest you to add support for '#' for query parsing.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self: parser.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(self._parse_field_def)\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n\n # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..\n # Reference: https://stackoverflow.com/a/336455\n DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Coalesce: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self: parser.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(self._parse_field_def)\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n\n # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..\n # Reference: https://stackoverflow.com/a/336455\n DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Coalesce: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\", \"$\", \"#\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]} | 2,789 | 140 |
gh_patches_debug_21422 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2405 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Metric Reader function not being called
As reported by @aabmass :v:
There is a bug [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py#L204). If a metric reader returns `False` in a shutdown, the next metric reader shutdown function won't be called.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from atexit import register, unregister
16 from logging import getLogger
17 from threading import Lock
18 from typing import Optional, Sequence
19
20 from opentelemetry._metrics import Meter as APIMeter
21 from opentelemetry._metrics import MeterProvider as APIMeterProvider
22 from opentelemetry._metrics import NoOpMeter
23 from opentelemetry._metrics.instrument import Counter as APICounter
24 from opentelemetry._metrics.instrument import Histogram as APIHistogram
25 from opentelemetry._metrics.instrument import (
26 ObservableCounter as APIObservableCounter,
27 )
28 from opentelemetry._metrics.instrument import (
29 ObservableGauge as APIObservableGauge,
30 )
31 from opentelemetry._metrics.instrument import (
32 ObservableUpDownCounter as APIObservableUpDownCounter,
33 )
34 from opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter
35 from opentelemetry.sdk._metrics.instrument import (
36 Counter,
37 Histogram,
38 ObservableCounter,
39 ObservableGauge,
40 ObservableUpDownCounter,
41 UpDownCounter,
42 )
43 from opentelemetry.sdk._metrics.measurement_consumer import (
44 MeasurementConsumer,
45 SynchronousMeasurementConsumer,
46 )
47 from opentelemetry.sdk._metrics.metric_reader import MetricReader
48 from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
49 from opentelemetry.sdk.resources import Resource
50 from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
51
52 _logger = getLogger(__name__)
53
54
55 class Meter(APIMeter):
56 def __init__(
57 self,
58 instrumentation_info: InstrumentationInfo,
59 measurement_consumer: MeasurementConsumer,
60 ):
61 super().__init__(instrumentation_info)
62 self._instrumentation_info = instrumentation_info
63 self._measurement_consumer = measurement_consumer
64
65 def create_counter(self, name, unit=None, description=None) -> APICounter:
66 return Counter(
67 name,
68 self._instrumentation_info,
69 self._measurement_consumer,
70 unit,
71 description,
72 )
73
74 def create_up_down_counter(
75 self, name, unit=None, description=None
76 ) -> APIUpDownCounter:
77 return UpDownCounter(
78 name,
79 self._instrumentation_info,
80 self._measurement_consumer,
81 unit,
82 description,
83 )
84
85 def create_observable_counter(
86 self, name, callback, unit=None, description=None
87 ) -> APIObservableCounter:
88
89 instrument = ObservableCounter(
90 name,
91 self._instrumentation_info,
92 self._measurement_consumer,
93 callback,
94 unit,
95 description,
96 )
97
98 self._measurement_consumer.register_asynchronous_instrument(instrument)
99
100 return instrument
101
102 def create_histogram(
103 self, name, unit=None, description=None
104 ) -> APIHistogram:
105 return Histogram(
106 name,
107 self._instrumentation_info,
108 self._measurement_consumer,
109 unit,
110 description,
111 )
112
113 def create_observable_gauge(
114 self, name, callback, unit=None, description=None
115 ) -> APIObservableGauge:
116
117 instrument = ObservableGauge(
118 name,
119 self._instrumentation_info,
120 self._measurement_consumer,
121 callback,
122 unit,
123 description,
124 )
125
126 self._measurement_consumer.register_asynchronous_instrument(instrument)
127
128 return instrument
129
130 def create_observable_up_down_counter(
131 self, name, callback, unit=None, description=None
132 ) -> APIObservableUpDownCounter:
133
134 instrument = ObservableUpDownCounter(
135 name,
136 self._instrumentation_info,
137 self._measurement_consumer,
138 callback,
139 unit,
140 description,
141 )
142
143 self._measurement_consumer.register_asynchronous_instrument(instrument)
144
145 return instrument
146
147
148 class MeterProvider(APIMeterProvider):
149 """See `opentelemetry._metrics.MeterProvider`."""
150
151 def __init__(
152 self,
153 metric_readers: Sequence[MetricReader] = (),
154 resource: Resource = Resource.create({}),
155 shutdown_on_exit: bool = True,
156 ):
157 self._lock = Lock()
158 self._meter_lock = Lock()
159 self._atexit_handler = None
160 self._sdk_config = SdkConfiguration(
161 resource=resource, metric_readers=metric_readers
162 )
163 self._measurement_consumer = SynchronousMeasurementConsumer(
164 sdk_config=self._sdk_config
165 )
166
167 if shutdown_on_exit:
168 self._atexit_handler = register(self.shutdown)
169
170 self._meters = {}
171 self._metric_readers = metric_readers
172
173 for metric_reader in self._sdk_config.metric_readers:
174 metric_reader._register_measurement_consumer(self)
175
176 self._shutdown = False
177
178 def force_flush(self) -> bool:
179
180 # FIXME implement a timeout
181
182 metric_reader_result = True
183
184 for metric_reader in self._sdk_config.metric_readers:
185 metric_reader_result = (
186 metric_reader_result and metric_reader.force_flush()
187 )
188
189 if not metric_reader_result:
190 _logger.warning("Unable to force flush all metric readers")
191
192 return metric_reader_result
193
194 def shutdown(self):
195 # FIXME implement a timeout
196
197 if self._shutdown:
198 _logger.warning("shutdown can only be called once")
199 return False
200
201 result = True
202
203 for metric_reader in self._sdk_config.metric_readers:
204 result = result and metric_reader.shutdown()
205
206 if not result:
207 _logger.warning("A MetricReader failed to shutdown")
208
209 self._shutdown = True
210
211 if self._atexit_handler is not None:
212 unregister(self._atexit_handler)
213 self._atexit_handler = None
214
215 return result
216
217 def get_meter(
218 self,
219 name: str,
220 version: Optional[str] = None,
221 schema_url: Optional[str] = None,
222 ) -> Meter:
223
224 if self._shutdown:
225 _logger.warning(
226 "A shutdown `MeterProvider` can not provide a `Meter`"
227 )
228 return NoOpMeter(name, version=version, schema_url=schema_url)
229
230 info = InstrumentationInfo(name, version, schema_url)
231 with self._meter_lock:
232 if not self._meters.get(info):
233 self._meters[info] = Meter(
234 info,
235 self._measurement_consumer,
236 )
237 return self._meters[info]
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
@@ -198,13 +198,17 @@
_logger.warning("shutdown can only be called once")
return False
- result = True
+ overall_result = True
for metric_reader in self._sdk_config.metric_readers:
- result = result and metric_reader.shutdown()
+ metric_reader_result = metric_reader.shutdown()
- if not result:
- _logger.warning("A MetricReader failed to shutdown")
+ if not metric_reader_result:
+ _logger.warning(
+ "MetricReader {metric_reader} failed to shutdown"
+ )
+
+ overall_result = overall_result and metric_reader_result
self._shutdown = True
@@ -212,7 +216,7 @@
unregister(self._atexit_handler)
self._atexit_handler = None
- return result
+ return overall_result
def get_meter(
self,
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n@@ -198,13 +198,17 @@\n _logger.warning(\"shutdown can only be called once\")\n return False\n \n- result = True\n+ overall_result = True\n \n for metric_reader in self._sdk_config.metric_readers:\n- result = result and metric_reader.shutdown()\n+ metric_reader_result = metric_reader.shutdown()\n \n- if not result:\n- _logger.warning(\"A MetricReader failed to shutdown\")\n+ if not metric_reader_result:\n+ _logger.warning(\n+ \"MetricReader {metric_reader} failed to shutdown\"\n+ )\n+\n+ overall_result = overall_result and metric_reader_result\n \n self._shutdown = True\n \n@@ -212,7 +216,7 @@\n unregister(self._atexit_handler)\n self._atexit_handler = None\n \n- return result\n+ return overall_result\n \n def get_meter(\n self,\n", "issue": "Metric Reader function not being called\nAs reported by @aabmass :v:\r\n\r\nThere is a bug [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py#L204). If a metric reader returns `False` in a shutdown, the next metric reader shutdown function won't be called.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom atexit import register, unregister\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import Optional, Sequence\n\nfrom opentelemetry._metrics import Meter as APIMeter\nfrom opentelemetry._metrics import MeterProvider as APIMeterProvider\nfrom opentelemetry._metrics import NoOpMeter\nfrom opentelemetry._metrics.instrument import Counter as APICounter\nfrom opentelemetry._metrics.instrument import Histogram as APIHistogram\nfrom opentelemetry._metrics.instrument import (\n ObservableCounter as APIObservableCounter,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableGauge as APIObservableGauge,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableUpDownCounter as APIObservableUpDownCounter,\n)\nfrom opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter\nfrom opentelemetry.sdk._metrics.instrument import (\n Counter,\n Histogram,\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk._metrics.measurement_consumer import (\n MeasurementConsumer,\n SynchronousMeasurementConsumer,\n)\nfrom opentelemetry.sdk._metrics.metric_reader import MetricReader\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.util.instrumentation import InstrumentationInfo\n\n_logger = getLogger(__name__)\n\n\nclass Meter(APIMeter):\n def __init__(\n self,\n instrumentation_info: InstrumentationInfo,\n measurement_consumer: MeasurementConsumer,\n ):\n super().__init__(instrumentation_info)\n self._instrumentation_info = instrumentation_info\n self._measurement_consumer = measurement_consumer\n\n def create_counter(self, name, unit=None, description=None) -> APICounter:\n return Counter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_up_down_counter(\n self, name, unit=None, description=None\n ) -> APIUpDownCounter:\n return UpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableCounter:\n\n instrument = ObservableCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_histogram(\n self, name, unit=None, description=None\n ) -> APIHistogram:\n return Histogram(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_gauge(\n self, name, callback, unit=None, description=None\n ) -> APIObservableGauge:\n\n instrument = ObservableGauge(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_observable_up_down_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableUpDownCounter:\n\n instrument = ObservableUpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n\nclass MeterProvider(APIMeterProvider):\n \"\"\"See `opentelemetry._metrics.MeterProvider`.\"\"\"\n\n def __init__(\n self,\n metric_readers: Sequence[MetricReader] = (),\n resource: Resource = Resource.create({}),\n shutdown_on_exit: bool = True,\n ):\n self._lock = Lock()\n self._meter_lock = Lock()\n self._atexit_handler = None\n self._sdk_config = SdkConfiguration(\n resource=resource, metric_readers=metric_readers\n )\n self._measurement_consumer = SynchronousMeasurementConsumer(\n sdk_config=self._sdk_config\n )\n\n if shutdown_on_exit:\n self._atexit_handler = register(self.shutdown)\n\n self._meters = {}\n self._metric_readers = metric_readers\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader._register_measurement_consumer(self)\n\n self._shutdown = False\n\n def force_flush(self) -> bool:\n\n # FIXME implement a timeout\n\n metric_reader_result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader_result = (\n metric_reader_result and metric_reader.force_flush()\n )\n\n if not metric_reader_result:\n _logger.warning(\"Unable to force flush all metric readers\")\n\n return metric_reader_result\n\n def shutdown(self):\n # FIXME implement a timeout\n\n if self._shutdown:\n _logger.warning(\"shutdown can only be called once\")\n return False\n\n result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n result = result and metric_reader.shutdown()\n\n if not result:\n _logger.warning(\"A MetricReader failed to shutdown\")\n\n self._shutdown = True\n\n if self._atexit_handler is not None:\n unregister(self._atexit_handler)\n self._atexit_handler = None\n\n return result\n\n def get_meter(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ) -> Meter:\n\n if self._shutdown:\n _logger.warning(\n \"A shutdown `MeterProvider` can not provide a `Meter`\"\n )\n return NoOpMeter(name, version=version, schema_url=schema_url)\n\n info = InstrumentationInfo(name, version, schema_url)\n with self._meter_lock:\n if not self._meters.get(info):\n self._meters[info] = Meter(\n info,\n self._measurement_consumer,\n )\n return self._meters[info]\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom atexit import register, unregister\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import Optional, Sequence\n\nfrom opentelemetry._metrics import Meter as APIMeter\nfrom opentelemetry._metrics import MeterProvider as APIMeterProvider\nfrom opentelemetry._metrics import NoOpMeter\nfrom opentelemetry._metrics.instrument import Counter as APICounter\nfrom opentelemetry._metrics.instrument import Histogram as APIHistogram\nfrom opentelemetry._metrics.instrument import (\n ObservableCounter as APIObservableCounter,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableGauge as APIObservableGauge,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableUpDownCounter as APIObservableUpDownCounter,\n)\nfrom opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter\nfrom opentelemetry.sdk._metrics.instrument import (\n Counter,\n Histogram,\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk._metrics.measurement_consumer import (\n MeasurementConsumer,\n SynchronousMeasurementConsumer,\n)\nfrom opentelemetry.sdk._metrics.metric_reader import MetricReader\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.util.instrumentation import InstrumentationInfo\n\n_logger = getLogger(__name__)\n\n\nclass Meter(APIMeter):\n def __init__(\n self,\n instrumentation_info: InstrumentationInfo,\n measurement_consumer: MeasurementConsumer,\n ):\n super().__init__(instrumentation_info)\n self._instrumentation_info = instrumentation_info\n self._measurement_consumer = measurement_consumer\n\n def create_counter(self, name, unit=None, description=None) -> APICounter:\n return Counter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_up_down_counter(\n self, name, unit=None, description=None\n ) -> APIUpDownCounter:\n return UpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableCounter:\n\n instrument = ObservableCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_histogram(\n self, name, unit=None, description=None\n ) -> APIHistogram:\n return Histogram(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_gauge(\n self, name, callback, unit=None, description=None\n ) -> APIObservableGauge:\n\n instrument = ObservableGauge(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_observable_up_down_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableUpDownCounter:\n\n instrument = ObservableUpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n\nclass MeterProvider(APIMeterProvider):\n \"\"\"See `opentelemetry._metrics.MeterProvider`.\"\"\"\n\n def __init__(\n self,\n metric_readers: Sequence[MetricReader] = (),\n resource: Resource = Resource.create({}),\n shutdown_on_exit: bool = True,\n ):\n self._lock = Lock()\n self._meter_lock = Lock()\n self._atexit_handler = None\n self._sdk_config = SdkConfiguration(\n resource=resource, metric_readers=metric_readers\n )\n self._measurement_consumer = SynchronousMeasurementConsumer(\n sdk_config=self._sdk_config\n )\n\n if shutdown_on_exit:\n self._atexit_handler = register(self.shutdown)\n\n self._meters = {}\n self._metric_readers = metric_readers\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader._register_measurement_consumer(self)\n\n self._shutdown = False\n\n def force_flush(self) -> bool:\n\n # FIXME implement a timeout\n\n metric_reader_result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader_result = (\n metric_reader_result and metric_reader.force_flush()\n )\n\n if not metric_reader_result:\n _logger.warning(\"Unable to force flush all metric readers\")\n\n return metric_reader_result\n\n def shutdown(self):\n # FIXME implement a timeout\n\n if self._shutdown:\n _logger.warning(\"shutdown can only be called once\")\n return False\n\n overall_result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader_result = metric_reader.shutdown()\n\n if not metric_reader_result:\n _logger.warning(\n \"MetricReader {metric_reader} failed to shutdown\"\n )\n\n overall_result = overall_result and metric_reader_result\n\n self._shutdown = True\n\n if self._atexit_handler is not None:\n unregister(self._atexit_handler)\n self._atexit_handler = None\n\n return overall_result\n\n def get_meter(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ) -> Meter:\n\n if self._shutdown:\n _logger.warning(\n \"A shutdown `MeterProvider` can not provide a `Meter`\"\n )\n return NoOpMeter(name, version=version, schema_url=schema_url)\n\n info = InstrumentationInfo(name, version, schema_url)\n with self._meter_lock:\n if not self._meters.get(info):\n self._meters[info] = Meter(\n info,\n self._measurement_consumer,\n )\n return self._meters[info]\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py"}]} | 2,393 | 270 |
gh_patches_debug_30084 | rasdani/github-patches | git_diff | cobbler__cobbler-3264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Task logs don't end
### Describe the bug
Task logs contain the normal server logs, even though the task has stopped. This leads to duplicated logs in all created files.
### Steps to reproduce
1. `cobbler mkloaders` (or any other task)
2. Repeat step 1
3. See logs being appended to completed tasks
### Expected behavior
The files should only contain the logs for the task.
### Cobbler version
Commit: 2a5c7ce9c1533bbf6f6b9050198d98aca45a06ae
````paste below
Cobbler 3.4.0
source: ?, ?
build time: Fri Oct 14 14:17:09 2022
````
### Operating system
```
e62e937a2de9:/var/log/cobbler/tasks # cat /etc/os-release
NAME="openSUSE Leap"
VERSION="15.3"
ID="opensuse-leap"
ID_LIKE="suse opensuse"
VERSION_ID="15.3"
PRETTY_NAME="openSUSE Leap 15.3"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:opensuse:leap:15.3"
BUG_REPORT_URL="https://bugs.opensuse.org"
HOME_URL="https://www.opensuse.org/"
```
### Cobbler log
Not relevant
### Screenshots
Not relevant
### Additional information
Found by accident during another debugging session
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/utils/thread.py`
Content:
```
1 """
2 This module is responsible for managing the custom common threading logic Cobbler has.
3 """
4
5 import logging
6 import pathlib
7 from threading import Thread
8 from typing import Callable
9
10 from cobbler import enums
11 from cobbler import utils
12
13
14 class CobblerThread(Thread):
15 """
16 This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers.
17 """
18
19 def __init__(
20 self,
21 event_id: str,
22 remote,
23 options: dict,
24 task_name: str,
25 api,
26 run: Callable,
27 on_done: Callable = None,
28 ):
29 """
30 This constructor creates a Cobbler thread which then may be run by calling ``run()``.
31
32 :param event_id: The event-id which is associated with this thread. Also used as thread name
33 :param remote: The Cobbler remote object to execute actions with.
34 :param options: Additional options which can be passed into the Thread.
35 :param task_name: The high level task name which is used to trigger pre- and post-task triggers
36 :param api: The Cobbler api object to resolve information with.
37 :param run: The callable that is going to be executed with this thread.
38 :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers.
39 """
40 super().__init__(name=event_id)
41 self.event_id = event_id
42 self.remote = remote
43 self.logger = logging.getLogger()
44 self.__setup_logger()
45 self._run = run
46 self.on_done = on_done
47 if options is None:
48 options = {}
49 self.options = options
50 self.task_name = task_name
51 self.api = api
52
53 def __setup_logger(self):
54 """
55 Utility function that will set up the Python logger for the tasks in a special directory.
56 """
57 filename = pathlib.Path("/var/log/cobbler/tasks") / f"{self.event_id}.log"
58 task_log_handler = logging.FileHandler(str(filename), encoding="utf-8")
59 task_log_formatter = logging.Formatter(
60 "[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s"
61 )
62 task_log_handler.setFormatter(task_log_formatter)
63 self.logger.setLevel(logging.INFO)
64 self.logger.addHandler(task_log_handler)
65
66 def _set_task_state(self, new_state: enums.EventStatus):
67 """
68 Set the state of the task. (For internal use only)
69
70 :param new_state: The new state of the task.
71 """
72 if not isinstance(new_state, enums.EventStatus):
73 raise TypeError('"new_state" needs to be of type enums.EventStatus!')
74 if self.event_id not in self.remote.events:
75 raise ValueError('"event_id" not existing!')
76 self.remote.events[self.event_id].state = new_state
77 # clear the list of who has read it
78 self.remote.events[self.event_id].read_by_who = []
79 if new_state == enums.EventStatus.COMPLETE:
80 self.logger.info("### TASK COMPLETE ###")
81 elif new_state == enums.EventStatus.FAILED:
82 self.logger.error("### TASK FAILED ###")
83
84 def run(self):
85 """
86 Run the thread.
87
88 :return: The return code of the action. This may a boolean or a Linux return code.
89 """
90 self.logger.info("start_task(%s); event_id(%s)", self.task_name, self.event_id)
91 try:
92 if utils.run_triggers(
93 api=self.api,
94 globber=f"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*",
95 additional=self.options,
96 ):
97 self._set_task_state(enums.EventStatus.FAILED)
98 return False
99 rc = self._run(self)
100 if rc is not None and not rc:
101 self._set_task_state(enums.EventStatus.FAILED)
102 else:
103 self._set_task_state(enums.EventStatus.COMPLETE)
104 if self.on_done is not None:
105 self.on_done()
106 utils.run_triggers(
107 api=self.api,
108 globber=f"/var/lib/cobbler/triggers/task/{self.task_name}/post/*",
109 additional=self.options,
110 )
111 return rc
112 except Exception:
113 utils.log_exc()
114 self._set_task_state(enums.EventStatus.FAILED)
115 return False
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/utils/thread.py b/cobbler/utils/thread.py
--- a/cobbler/utils/thread.py
+++ b/cobbler/utils/thread.py
@@ -41,6 +41,7 @@
self.event_id = event_id
self.remote = remote
self.logger = logging.getLogger()
+ self.__task_log_handler = None
self.__setup_logger()
self._run = run
self.on_done = on_done
@@ -55,13 +56,13 @@
Utility function that will set up the Python logger for the tasks in a special directory.
"""
filename = pathlib.Path("/var/log/cobbler/tasks") / f"{self.event_id}.log"
- task_log_handler = logging.FileHandler(str(filename), encoding="utf-8")
+ self.__task_log_handler = logging.FileHandler(str(filename), encoding="utf-8")
task_log_formatter = logging.Formatter(
"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s"
)
- task_log_handler.setFormatter(task_log_formatter)
+ self.__task_log_handler.setFormatter(task_log_formatter)
self.logger.setLevel(logging.INFO)
- self.logger.addHandler(task_log_handler)
+ self.logger.addHandler(self.__task_log_handler)
def _set_task_state(self, new_state: enums.EventStatus):
"""
@@ -113,3 +114,5 @@
utils.log_exc()
self._set_task_state(enums.EventStatus.FAILED)
return False
+ finally:
+ self.logger.removeHandler(self.__task_log_handler)
| {"golden_diff": "diff --git a/cobbler/utils/thread.py b/cobbler/utils/thread.py\n--- a/cobbler/utils/thread.py\n+++ b/cobbler/utils/thread.py\n@@ -41,6 +41,7 @@\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n+ self.__task_log_handler = None\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n@@ -55,13 +56,13 @@\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n- task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n+ self.__task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n- task_log_handler.setFormatter(task_log_formatter)\n+ self.__task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n- self.logger.addHandler(task_log_handler)\n+ self.logger.addHandler(self.__task_log_handler)\n \n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n@@ -113,3 +114,5 @@\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n+ finally:\n+ self.logger.removeHandler(self.__task_log_handler)\n", "issue": "Task logs don't end\n### Describe the bug\r\n\r\nTask logs contain the normal server logs, even though the task has stopped. This leads to duplicated logs in all created files.\r\n\r\n### Steps to reproduce\r\n\r\n1. `cobbler mkloaders` (or any other task)\r\n2. Repeat step 1\r\n3. See logs being appended to completed tasks\r\n\r\n### Expected behavior\r\n\r\nThe files should only contain the logs for the task.\r\n\r\n### Cobbler version\r\n\r\nCommit: 2a5c7ce9c1533bbf6f6b9050198d98aca45a06ae\r\n\r\n````paste below\r\nCobbler 3.4.0\r\n source: ?, ?\r\n build time: Fri Oct 14 14:17:09 2022\r\n````\r\n\r\n### Operating system\r\n\r\n```\r\ne62e937a2de9:/var/log/cobbler/tasks # cat /etc/os-release \r\nNAME=\"openSUSE Leap\"\r\nVERSION=\"15.3\"\r\nID=\"opensuse-leap\"\r\nID_LIKE=\"suse opensuse\"\r\nVERSION_ID=\"15.3\"\r\nPRETTY_NAME=\"openSUSE Leap 15.3\"\r\nANSI_COLOR=\"0;32\"\r\nCPE_NAME=\"cpe:/o:opensuse:leap:15.3\"\r\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\r\nHOME_URL=\"https://www.opensuse.org/\"\r\n```\r\n\r\n### Cobbler log\r\n\r\nNot relevant\r\n\r\n### Screenshots\r\n\r\nNot relevant\r\n\r\n### Additional information\r\n\r\nFound by accident during another debugging session\r\n\n", "before_files": [{"content": "\"\"\"\nThis module is responsible for managing the custom common threading logic Cobbler has.\n\"\"\"\n\nimport logging\nimport pathlib\nfrom threading import Thread\nfrom typing import Callable\n\nfrom cobbler import enums\nfrom cobbler import utils\n\n\nclass CobblerThread(Thread):\n \"\"\"\n This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers.\n \"\"\"\n\n def __init__(\n self,\n event_id: str,\n remote,\n options: dict,\n task_name: str,\n api,\n run: Callable,\n on_done: Callable = None,\n ):\n \"\"\"\n This constructor creates a Cobbler thread which then may be run by calling ``run()``.\n\n :param event_id: The event-id which is associated with this thread. Also used as thread name\n :param remote: The Cobbler remote object to execute actions with.\n :param options: Additional options which can be passed into the Thread.\n :param task_name: The high level task name which is used to trigger pre- and post-task triggers\n :param api: The Cobbler api object to resolve information with.\n :param run: The callable that is going to be executed with this thread.\n :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers.\n \"\"\"\n super().__init__(name=event_id)\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n if options is None:\n options = {}\n self.options = options\n self.task_name = task_name\n self.api = api\n\n def __setup_logger(self):\n \"\"\"\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(task_log_handler)\n\n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n Set the state of the task. (For internal use only)\n\n :param new_state: The new state of the task.\n \"\"\"\n if not isinstance(new_state, enums.EventStatus):\n raise TypeError('\"new_state\" needs to be of type enums.EventStatus!')\n if self.event_id not in self.remote.events:\n raise ValueError('\"event_id\" not existing!')\n self.remote.events[self.event_id].state = new_state\n # clear the list of who has read it\n self.remote.events[self.event_id].read_by_who = []\n if new_state == enums.EventStatus.COMPLETE:\n self.logger.info(\"### TASK COMPLETE ###\")\n elif new_state == enums.EventStatus.FAILED:\n self.logger.error(\"### TASK FAILED ###\")\n\n def run(self):\n \"\"\"\n Run the thread.\n\n :return: The return code of the action. This may a boolean or a Linux return code.\n \"\"\"\n self.logger.info(\"start_task(%s); event_id(%s)\", self.task_name, self.event_id)\n try:\n if utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*\",\n additional=self.options,\n ):\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n rc = self._run(self)\n if rc is not None and not rc:\n self._set_task_state(enums.EventStatus.FAILED)\n else:\n self._set_task_state(enums.EventStatus.COMPLETE)\n if self.on_done is not None:\n self.on_done()\n utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/post/*\",\n additional=self.options,\n )\n return rc\n except Exception:\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n", "path": "cobbler/utils/thread.py"}], "after_files": [{"content": "\"\"\"\nThis module is responsible for managing the custom common threading logic Cobbler has.\n\"\"\"\n\nimport logging\nimport pathlib\nfrom threading import Thread\nfrom typing import Callable\n\nfrom cobbler import enums\nfrom cobbler import utils\n\n\nclass CobblerThread(Thread):\n \"\"\"\n This is a custom thread that has a custom logger as well as logic to execute Cobbler triggers.\n \"\"\"\n\n def __init__(\n self,\n event_id: str,\n remote,\n options: dict,\n task_name: str,\n api,\n run: Callable,\n on_done: Callable = None,\n ):\n \"\"\"\n This constructor creates a Cobbler thread which then may be run by calling ``run()``.\n\n :param event_id: The event-id which is associated with this thread. Also used as thread name\n :param remote: The Cobbler remote object to execute actions with.\n :param options: Additional options which can be passed into the Thread.\n :param task_name: The high level task name which is used to trigger pre- and post-task triggers\n :param api: The Cobbler api object to resolve information with.\n :param run: The callable that is going to be executed with this thread.\n :param on_done: An optional callable that is going to be executed after ``run`` but before the triggers.\n \"\"\"\n super().__init__(name=event_id)\n self.event_id = event_id\n self.remote = remote\n self.logger = logging.getLogger()\n self.__task_log_handler = None\n self.__setup_logger()\n self._run = run\n self.on_done = on_done\n if options is None:\n options = {}\n self.options = options\n self.task_name = task_name\n self.api = api\n\n def __setup_logger(self):\n \"\"\"\n Utility function that will set up the Python logger for the tasks in a special directory.\n \"\"\"\n filename = pathlib.Path(\"/var/log/cobbler/tasks\") / f\"{self.event_id}.log\"\n self.__task_log_handler = logging.FileHandler(str(filename), encoding=\"utf-8\")\n task_log_formatter = logging.Formatter(\n \"[%(threadName)s] %(asctime)s - %(levelname)s | %(message)s\"\n )\n self.__task_log_handler.setFormatter(task_log_formatter)\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(self.__task_log_handler)\n\n def _set_task_state(self, new_state: enums.EventStatus):\n \"\"\"\n Set the state of the task. (For internal use only)\n\n :param new_state: The new state of the task.\n \"\"\"\n if not isinstance(new_state, enums.EventStatus):\n raise TypeError('\"new_state\" needs to be of type enums.EventStatus!')\n if self.event_id not in self.remote.events:\n raise ValueError('\"event_id\" not existing!')\n self.remote.events[self.event_id].state = new_state\n # clear the list of who has read it\n self.remote.events[self.event_id].read_by_who = []\n if new_state == enums.EventStatus.COMPLETE:\n self.logger.info(\"### TASK COMPLETE ###\")\n elif new_state == enums.EventStatus.FAILED:\n self.logger.error(\"### TASK FAILED ###\")\n\n def run(self):\n \"\"\"\n Run the thread.\n\n :return: The return code of the action. This may a boolean or a Linux return code.\n \"\"\"\n self.logger.info(\"start_task(%s); event_id(%s)\", self.task_name, self.event_id)\n try:\n if utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/pre/*\",\n additional=self.options,\n ):\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n rc = self._run(self)\n if rc is not None and not rc:\n self._set_task_state(enums.EventStatus.FAILED)\n else:\n self._set_task_state(enums.EventStatus.COMPLETE)\n if self.on_done is not None:\n self.on_done()\n utils.run_triggers(\n api=self.api,\n globber=f\"/var/lib/cobbler/triggers/task/{self.task_name}/post/*\",\n additional=self.options,\n )\n return rc\n except Exception:\n utils.log_exc()\n self._set_task_state(enums.EventStatus.FAILED)\n return False\n finally:\n self.logger.removeHandler(self.__task_log_handler)\n", "path": "cobbler/utils/thread.py"}]} | 1,763 | 345 |
gh_patches_debug_8356 | rasdani/github-patches | git_diff | interlegis__sapl-437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sapl 31: definir ordem de apresentação dos mandatos do parlamentar
Na função Parlamentar, definir a ordem de apresentação dos mandatos do parlamentar. Penso que a ordem decrescente é a adequada.
Exemplo: http://sapl31agudo.interlegis.leg.br/parlamentar/21/mandato
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlamentares/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.urlresolvers import reverse_lazy
3 from django.shortcuts import redirect
4 from django.utils.datastructures import MultiValueDictKeyError
5 from django.utils.translation import ugettext_lazy as _
6 from django.views.generic import FormView
7
8 import crud.base
9 import crud.masterdetail
10 from crud.base import Crud
11 from crud.masterdetail import MasterDetailCrud
12
13 from .forms import FiliacaoForm, ParlamentarCreateForm, ParlamentarForm
14 from .models import (CargoMesa, Coligacao, ComposicaoMesa, Dependente,
15 Filiacao, Legislatura, Mandato, NivelInstrucao,
16 Parlamentar, Partido, SessaoLegislativa, SituacaoMilitar,
17 TipoAfastamento, TipoDependente)
18
19 CargoMesaCrud = Crud.build(CargoMesa, 'cargo_mesa')
20 LegislaturaCrud = Crud.build(Legislatura, 'tabelas_auxiliares#legislatura')
21 ColigacaoCrud = Crud.build(Coligacao, 'coligacao')
22 PartidoCrud = Crud.build(Partido, 'partidos')
23 SessaoLegislativaCrud = Crud.build(SessaoLegislativa, 'sessao_legislativa')
24 TipoDependenteCrud = Crud.build(TipoDependente, 'tipo_dependente')
25 NivelInstrucaoCrud = Crud.build(NivelInstrucao, 'nivel_instrucao')
26 TipoAfastamentoCrud = Crud.build(TipoAfastamento, 'tipo_afastamento')
27 TipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')
28
29 DependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')
30 MandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')
31
32
33 class FiliacaoCrud(MasterDetailCrud):
34 model = Filiacao
35 parent_field = 'parlamentar'
36 help_path = ''
37
38 class CreateView(MasterDetailCrud.CreateView):
39 form_class = FiliacaoForm
40
41 class UpdateView(MasterDetailCrud.UpdateView):
42 form_class = FiliacaoForm
43
44 class ListView(MasterDetailCrud.ListView):
45 ordering = '-data'
46
47
48 class ParlamentarCrud(Crud):
49 model = Parlamentar
50 help_path = ''
51
52 class UpdateView(crud.base.CrudUpdateView):
53 form_class = ParlamentarForm
54
55 class CreateView(crud.base.CrudCreateView):
56 form_class = ParlamentarCreateForm
57
58 @property
59 def layout_key(self):
60 return 'ParlamentarCreate'
61
62 class ListView(crud.base.CrudListView):
63 template_name = "parlamentares/parlamentares_list.html"
64 paginate_by = None
65
66 def take_legislatura_id(self):
67 legislaturas = Legislatura.objects.all().order_by(
68 '-data_inicio', '-data_fim')
69
70 try:
71 legislatura_id = int(self.request.GET['periodo'])
72 except MultiValueDictKeyError:
73 legislatura_id = legislaturas.first().id
74
75 return legislatura_id
76
77 def get_queryset(self):
78 mandatos = Mandato.objects.filter(
79 legislatura_id=self.take_legislatura_id())
80 return mandatos
81
82 def get_rows(self, object_list):
83 parlamentares = []
84 for m in object_list:
85 ultima_filiacao = m.parlamentar.filiacao_set.\
86 order_by('-data').first()
87 if ultima_filiacao and not ultima_filiacao.data_desfiliacao:
88 partido = ultima_filiacao.partido.sigla
89 else:
90 partido = _('Sem Partido')
91
92 parlamentar = [
93 (m.parlamentar.nome_parlamentar, m.parlamentar.id),
94 (partido, None),
95 ('Sim' if m.parlamentar.ativo else 'Não', None)
96 ]
97 parlamentares.append(parlamentar)
98 return parlamentares
99
100 def get_headers(self):
101 return ['Parlamentar', 'Partido', 'Ativo?']
102
103 def get_context_data(self, **kwargs):
104 context = super(ParlamentarCrud.ListView, self
105 ).get_context_data(**kwargs)
106 context.setdefault('title', self.verbose_name_plural)
107
108 # Adiciona legislatura para filtrar parlamentares
109 legislaturas = Legislatura.objects.all().order_by(
110 '-data_inicio', '-data_fim')
111 context['legislaturas'] = legislaturas
112 context['legislatura_id'] = self.take_legislatura_id()
113 return context
114
115
116 class MesaDiretoraView(FormView):
117 template_name = "mesa_diretora/mesa_diretora.html"
118 success_url = reverse_lazy('parlamentares:mesa_diretora')
119
120 # Essa função avisa quando se pode compor uma Mesa Legislativa)
121 def validation(self, request):
122 mensagem = _("Não há nenhuma Sessão Legislativa cadastrada. \
123 Só é possível compor uma Mesa Diretora quando há uma Sessão \
124 Legislativa cadastrada.")
125 messages.add_message(request, messages.INFO, mensagem)
126
127 return self.render_to_response(
128 {'legislaturas': Legislatura.objects.all(
129 ).order_by('-data_inicio'),
130 'legislatura_selecionada': Legislatura.objects.last(),
131 'cargos_vagos': CargoMesa.objects.all()})
132
133 def get(self, request, *args, **kwargs):
134
135 if (not Legislatura.objects.all() or
136 not SessaoLegislativa.objects.all()):
137 return self.validation(request)
138
139 mesa = SessaoLegislativa.objects.filter(
140 legislatura=Legislatura.objects.last()).first(
141 ).composicaomesa_set.all()
142
143 cargos_ocupados = [m.cargo for m in mesa]
144 cargos = CargoMesa.objects.all()
145 cargos_vagos = list(set(cargos) - set(cargos_ocupados))
146
147 parlamentares = Legislatura.objects.last().mandato_set.all()
148 parlamentares_ocupados = [m.parlamentar for m in mesa]
149 parlamentares_vagos = list(
150 set(
151 [p.parlamentar for p in parlamentares]) - set(
152 parlamentares_ocupados))
153
154 return self.render_to_response(
155 {'legislaturas': Legislatura.objects.all(
156 ).order_by('-data_inicio'),
157 'legislatura_selecionada': Legislatura.objects.last(),
158 'sessoes': SessaoLegislativa.objects.filter(
159 legislatura=Legislatura.objects.last()),
160 'sessao_selecionada': SessaoLegislativa.objects.filter(
161 legislatura=Legislatura.objects.last()).first(),
162 'composicao_mesa': mesa,
163 'parlamentares': parlamentares_vagos,
164 'cargos_vagos': cargos_vagos
165 })
166
167 def post(self, request, *args, **kwargs):
168 if 'Incluir' in request.POST:
169
170 if (not Legislatura.objects.all() or
171 not SessaoLegislativa.objects.all()):
172 return self.validation(request)
173
174 composicao = ComposicaoMesa()
175 composicao.sessao_legislativa = SessaoLegislativa.objects.get(
176 id=int(request.POST['sessao']))
177 composicao.parlamentar = Parlamentar.objects.get(
178 id=int(request.POST['parlamentar']))
179 composicao.cargo = CargoMesa.objects.get(
180 id=int(request.POST['cargo']))
181 composicao.save()
182
183 return redirect('parlamentares:mesa_diretora')
184
185 elif 'Excluir' in request.POST:
186
187 if (not Legislatura.objects.all() or
188 not SessaoLegislativa.objects.all()):
189 return self.validation(request)
190
191 if 'composicao_mesa' in request.POST:
192 ids = request.POST['composicao_mesa'].split(':')
193 composicao = ComposicaoMesa.objects.get(
194 sessao_legislativa_id=int(request.POST['sessao']),
195 parlamentar_id=int(ids[0]),
196 cargo_id=int(ids[1])
197 )
198 composicao.delete()
199 return redirect('parlamentares:mesa_diretora')
200 else:
201 mesa = ComposicaoMesa.objects.filter(
202 sessao_legislativa=request.POST['sessao'])
203
204 cargos_ocupados = [m.cargo for m in mesa]
205 cargos = CargoMesa.objects.all()
206 cargos_vagos = list(set(cargos) - set(cargos_ocupados))
207
208 parlamentares = Legislatura.objects.get(
209 id=int(request.POST['legislatura'])).mandato_set.all()
210 parlamentares_ocupados = [m.parlamentar for m in mesa]
211 parlamentares_vagos = list(
212 set(
213 [p.parlamentar for p in parlamentares]) - set(
214 parlamentares_ocupados))
215 return self.render_to_response(
216 {'legislaturas': Legislatura.objects.all(
217 ).order_by('-data_inicio'),
218 'legislatura_selecionada': Legislatura.objects.get(
219 id=int(request.POST['legislatura'])),
220 'sessoes': SessaoLegislativa.objects.filter(
221 legislatura_id=int(request.POST['legislatura'])),
222 'sessao_selecionada': SessaoLegislativa.objects.get(
223 id=int(request.POST['sessao'])),
224 'composicao_mesa': mesa,
225 'parlamentares': parlamentares_vagos,
226 'cargos_vagos': cargos_vagos
227 })
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlamentares/views.py b/parlamentares/views.py
--- a/parlamentares/views.py
+++ b/parlamentares/views.py
@@ -27,8 +27,14 @@
TipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')
DependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')
-MandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')
+class MandatoCrud(MasterDetailCrud):
+ model = Mandato
+ parent_field = 'parlamentar'
+ help_path = ''
+
+ class ListView(MasterDetailCrud.ListView):
+ ordering = ('-data_fim_mandato')
class FiliacaoCrud(MasterDetailCrud):
model = Filiacao
| {"golden_diff": "diff --git a/parlamentares/views.py b/parlamentares/views.py\n--- a/parlamentares/views.py\n+++ b/parlamentares/views.py\n@@ -27,8 +27,14 @@\n TipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')\n \n DependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')\n-MandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')\n \n+class MandatoCrud(MasterDetailCrud):\n+ model = Mandato\n+ parent_field = 'parlamentar'\n+ help_path = ''\n+\n+ class ListView(MasterDetailCrud.ListView):\n+ ordering = ('-data_fim_mandato')\n \n class FiliacaoCrud(MasterDetailCrud):\n model = Filiacao\n", "issue": "sapl 31: definir ordem de apresenta\u00e7\u00e3o dos mandatos do parlamentar\nNa fun\u00e7\u00e3o Parlamentar, definir a ordem de apresenta\u00e7\u00e3o dos mandatos do parlamentar. Penso que a ordem decrescente \u00e9 a adequada.\nExemplo: http://sapl31agudo.interlegis.leg.br/parlamentar/21/mandato\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import FormView\n\nimport crud.base\nimport crud.masterdetail\nfrom crud.base import Crud\nfrom crud.masterdetail import MasterDetailCrud\n\nfrom .forms import FiliacaoForm, ParlamentarCreateForm, ParlamentarForm\nfrom .models import (CargoMesa, Coligacao, ComposicaoMesa, Dependente,\n Filiacao, Legislatura, Mandato, NivelInstrucao,\n Parlamentar, Partido, SessaoLegislativa, SituacaoMilitar,\n TipoAfastamento, TipoDependente)\n\nCargoMesaCrud = Crud.build(CargoMesa, 'cargo_mesa')\nLegislaturaCrud = Crud.build(Legislatura, 'tabelas_auxiliares#legislatura')\nColigacaoCrud = Crud.build(Coligacao, 'coligacao')\nPartidoCrud = Crud.build(Partido, 'partidos')\nSessaoLegislativaCrud = Crud.build(SessaoLegislativa, 'sessao_legislativa')\nTipoDependenteCrud = Crud.build(TipoDependente, 'tipo_dependente')\nNivelInstrucaoCrud = Crud.build(NivelInstrucao, 'nivel_instrucao')\nTipoAfastamentoCrud = Crud.build(TipoAfastamento, 'tipo_afastamento')\nTipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')\n\nDependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')\nMandatoCrud = MasterDetailCrud.build(Mandato, 'parlamentar', '')\n\n\nclass FiliacaoCrud(MasterDetailCrud):\n model = Filiacao\n parent_field = 'parlamentar'\n help_path = ''\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = FiliacaoForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = FiliacaoForm\n\n class ListView(MasterDetailCrud.ListView):\n ordering = '-data'\n\n\nclass ParlamentarCrud(Crud):\n model = Parlamentar\n help_path = ''\n\n class UpdateView(crud.base.CrudUpdateView):\n form_class = ParlamentarForm\n\n class CreateView(crud.base.CrudCreateView):\n form_class = ParlamentarCreateForm\n\n @property\n def layout_key(self):\n return 'ParlamentarCreate'\n\n class ListView(crud.base.CrudListView):\n template_name = \"parlamentares/parlamentares_list.html\"\n paginate_by = None\n\n def take_legislatura_id(self):\n legislaturas = Legislatura.objects.all().order_by(\n '-data_inicio', '-data_fim')\n\n try:\n legislatura_id = int(self.request.GET['periodo'])\n except MultiValueDictKeyError:\n legislatura_id = legislaturas.first().id\n\n return legislatura_id\n\n def get_queryset(self):\n mandatos = Mandato.objects.filter(\n legislatura_id=self.take_legislatura_id())\n return mandatos\n\n def get_rows(self, object_list):\n parlamentares = []\n for m in object_list:\n ultima_filiacao = m.parlamentar.filiacao_set.\\\n order_by('-data').first()\n if ultima_filiacao and not ultima_filiacao.data_desfiliacao:\n partido = ultima_filiacao.partido.sigla\n else:\n partido = _('Sem Partido')\n\n parlamentar = [\n (m.parlamentar.nome_parlamentar, m.parlamentar.id),\n (partido, None),\n ('Sim' if m.parlamentar.ativo else 'N\u00e3o', None)\n ]\n parlamentares.append(parlamentar)\n return parlamentares\n\n def get_headers(self):\n return ['Parlamentar', 'Partido', 'Ativo?']\n\n def get_context_data(self, **kwargs):\n context = super(ParlamentarCrud.ListView, self\n ).get_context_data(**kwargs)\n context.setdefault('title', self.verbose_name_plural)\n\n # Adiciona legislatura para filtrar parlamentares\n legislaturas = Legislatura.objects.all().order_by(\n '-data_inicio', '-data_fim')\n context['legislaturas'] = legislaturas\n context['legislatura_id'] = self.take_legislatura_id()\n return context\n\n\nclass MesaDiretoraView(FormView):\n template_name = \"mesa_diretora/mesa_diretora.html\"\n success_url = reverse_lazy('parlamentares:mesa_diretora')\n\n # Essa fun\u00e7\u00e3o avisa quando se pode compor uma Mesa Legislativa)\n def validation(self, request):\n mensagem = _(\"N\u00e3o h\u00e1 nenhuma Sess\u00e3o Legislativa cadastrada. \\\n S\u00f3 \u00e9 poss\u00edvel compor uma Mesa Diretora quando h\u00e1 uma Sess\u00e3o \\\n Legislativa cadastrada.\")\n messages.add_message(request, messages.INFO, mensagem)\n\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.last(),\n 'cargos_vagos': CargoMesa.objects.all()})\n\n def get(self, request, *args, **kwargs):\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n mesa = SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()).first(\n ).composicaomesa_set.all()\n\n cargos_ocupados = [m.cargo for m in mesa]\n cargos = CargoMesa.objects.all()\n cargos_vagos = list(set(cargos) - set(cargos_ocupados))\n\n parlamentares = Legislatura.objects.last().mandato_set.all()\n parlamentares_ocupados = [m.parlamentar for m in mesa]\n parlamentares_vagos = list(\n set(\n [p.parlamentar for p in parlamentares]) - set(\n parlamentares_ocupados))\n\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.last(),\n 'sessoes': SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()),\n 'sessao_selecionada': SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()).first(),\n 'composicao_mesa': mesa,\n 'parlamentares': parlamentares_vagos,\n 'cargos_vagos': cargos_vagos\n })\n\n def post(self, request, *args, **kwargs):\n if 'Incluir' in request.POST:\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n composicao = ComposicaoMesa()\n composicao.sessao_legislativa = SessaoLegislativa.objects.get(\n id=int(request.POST['sessao']))\n composicao.parlamentar = Parlamentar.objects.get(\n id=int(request.POST['parlamentar']))\n composicao.cargo = CargoMesa.objects.get(\n id=int(request.POST['cargo']))\n composicao.save()\n\n return redirect('parlamentares:mesa_diretora')\n\n elif 'Excluir' in request.POST:\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n if 'composicao_mesa' in request.POST:\n ids = request.POST['composicao_mesa'].split(':')\n composicao = ComposicaoMesa.objects.get(\n sessao_legislativa_id=int(request.POST['sessao']),\n parlamentar_id=int(ids[0]),\n cargo_id=int(ids[1])\n )\n composicao.delete()\n return redirect('parlamentares:mesa_diretora')\n else:\n mesa = ComposicaoMesa.objects.filter(\n sessao_legislativa=request.POST['sessao'])\n\n cargos_ocupados = [m.cargo for m in mesa]\n cargos = CargoMesa.objects.all()\n cargos_vagos = list(set(cargos) - set(cargos_ocupados))\n\n parlamentares = Legislatura.objects.get(\n id=int(request.POST['legislatura'])).mandato_set.all()\n parlamentares_ocupados = [m.parlamentar for m in mesa]\n parlamentares_vagos = list(\n set(\n [p.parlamentar for p in parlamentares]) - set(\n parlamentares_ocupados))\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.get(\n id=int(request.POST['legislatura'])),\n 'sessoes': SessaoLegislativa.objects.filter(\n legislatura_id=int(request.POST['legislatura'])),\n 'sessao_selecionada': SessaoLegislativa.objects.get(\n id=int(request.POST['sessao'])),\n 'composicao_mesa': mesa,\n 'parlamentares': parlamentares_vagos,\n 'cargos_vagos': cargos_vagos\n })\n", "path": "parlamentares/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import FormView\n\nimport crud.base\nimport crud.masterdetail\nfrom crud.base import Crud\nfrom crud.masterdetail import MasterDetailCrud\n\nfrom .forms import FiliacaoForm, ParlamentarCreateForm, ParlamentarForm\nfrom .models import (CargoMesa, Coligacao, ComposicaoMesa, Dependente,\n Filiacao, Legislatura, Mandato, NivelInstrucao,\n Parlamentar, Partido, SessaoLegislativa, SituacaoMilitar,\n TipoAfastamento, TipoDependente)\n\nCargoMesaCrud = Crud.build(CargoMesa, 'cargo_mesa')\nLegislaturaCrud = Crud.build(Legislatura, 'tabelas_auxiliares#legislatura')\nColigacaoCrud = Crud.build(Coligacao, 'coligacao')\nPartidoCrud = Crud.build(Partido, 'partidos')\nSessaoLegislativaCrud = Crud.build(SessaoLegislativa, 'sessao_legislativa')\nTipoDependenteCrud = Crud.build(TipoDependente, 'tipo_dependente')\nNivelInstrucaoCrud = Crud.build(NivelInstrucao, 'nivel_instrucao')\nTipoAfastamentoCrud = Crud.build(TipoAfastamento, 'tipo_afastamento')\nTipoMilitarCrud = Crud.build(SituacaoMilitar, 'tipo_situa_militar')\n\nDependenteCrud = MasterDetailCrud.build(Dependente, 'parlamentar', '')\n\nclass MandatoCrud(MasterDetailCrud):\n model = Mandato\n parent_field = 'parlamentar'\n help_path = ''\n\n class ListView(MasterDetailCrud.ListView):\n ordering = ('-data_fim_mandato')\n\nclass FiliacaoCrud(MasterDetailCrud):\n model = Filiacao\n parent_field = 'parlamentar'\n help_path = ''\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = FiliacaoForm\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = FiliacaoForm\n\n class ListView(MasterDetailCrud.ListView):\n ordering = '-data'\n\n\nclass ParlamentarCrud(Crud):\n model = Parlamentar\n help_path = ''\n\n class UpdateView(crud.base.CrudUpdateView):\n form_class = ParlamentarForm\n\n class CreateView(crud.base.CrudCreateView):\n form_class = ParlamentarCreateForm\n\n @property\n def layout_key(self):\n return 'ParlamentarCreate'\n\n class ListView(crud.base.CrudListView):\n template_name = \"parlamentares/parlamentares_list.html\"\n paginate_by = None\n\n def take_legislatura_id(self):\n legislaturas = Legislatura.objects.all().order_by(\n '-data_inicio', '-data_fim')\n\n try:\n legislatura_id = int(self.request.GET['periodo'])\n except MultiValueDictKeyError:\n legislatura_id = legislaturas.first().id\n\n return legislatura_id\n\n def get_queryset(self):\n mandatos = Mandato.objects.filter(\n legislatura_id=self.take_legislatura_id())\n return mandatos\n\n def get_rows(self, object_list):\n parlamentares = []\n for m in object_list:\n ultima_filiacao = m.parlamentar.filiacao_set.\\\n order_by('-data').first()\n if ultima_filiacao and not ultima_filiacao.data_desfiliacao:\n partido = ultima_filiacao.partido.sigla\n else:\n partido = _('Sem Partido')\n\n parlamentar = [\n (m.parlamentar.nome_parlamentar, m.parlamentar.id),\n (partido, None),\n ('Sim' if m.parlamentar.ativo else 'N\u00e3o', None)\n ]\n parlamentares.append(parlamentar)\n return parlamentares\n\n def get_headers(self):\n return ['Parlamentar', 'Partido', 'Ativo?']\n\n def get_context_data(self, **kwargs):\n context = super(ParlamentarCrud.ListView, self\n ).get_context_data(**kwargs)\n context.setdefault('title', self.verbose_name_plural)\n\n # Adiciona legislatura para filtrar parlamentares\n legislaturas = Legislatura.objects.all().order_by(\n '-data_inicio', '-data_fim')\n context['legislaturas'] = legislaturas\n context['legislatura_id'] = self.take_legislatura_id()\n return context\n\n\nclass MesaDiretoraView(FormView):\n template_name = \"mesa_diretora/mesa_diretora.html\"\n success_url = reverse_lazy('parlamentares:mesa_diretora')\n\n # Essa fun\u00e7\u00e3o avisa quando se pode compor uma Mesa Legislativa)\n def validation(self, request):\n mensagem = _(\"N\u00e3o h\u00e1 nenhuma Sess\u00e3o Legislativa cadastrada. \\\n S\u00f3 \u00e9 poss\u00edvel compor uma Mesa Diretora quando h\u00e1 uma Sess\u00e3o \\\n Legislativa cadastrada.\")\n messages.add_message(request, messages.INFO, mensagem)\n\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.last(),\n 'cargos_vagos': CargoMesa.objects.all()})\n\n def get(self, request, *args, **kwargs):\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n mesa = SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()).first(\n ).composicaomesa_set.all()\n\n cargos_ocupados = [m.cargo for m in mesa]\n cargos = CargoMesa.objects.all()\n cargos_vagos = list(set(cargos) - set(cargos_ocupados))\n\n parlamentares = Legislatura.objects.last().mandato_set.all()\n parlamentares_ocupados = [m.parlamentar for m in mesa]\n parlamentares_vagos = list(\n set(\n [p.parlamentar for p in parlamentares]) - set(\n parlamentares_ocupados))\n\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.last(),\n 'sessoes': SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()),\n 'sessao_selecionada': SessaoLegislativa.objects.filter(\n legislatura=Legislatura.objects.last()).first(),\n 'composicao_mesa': mesa,\n 'parlamentares': parlamentares_vagos,\n 'cargos_vagos': cargos_vagos\n })\n\n def post(self, request, *args, **kwargs):\n if 'Incluir' in request.POST:\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n composicao = ComposicaoMesa()\n composicao.sessao_legislativa = SessaoLegislativa.objects.get(\n id=int(request.POST['sessao']))\n composicao.parlamentar = Parlamentar.objects.get(\n id=int(request.POST['parlamentar']))\n composicao.cargo = CargoMesa.objects.get(\n id=int(request.POST['cargo']))\n composicao.save()\n\n return redirect('parlamentares:mesa_diretora')\n\n elif 'Excluir' in request.POST:\n\n if (not Legislatura.objects.all() or\n not SessaoLegislativa.objects.all()):\n return self.validation(request)\n\n if 'composicao_mesa' in request.POST:\n ids = request.POST['composicao_mesa'].split(':')\n composicao = ComposicaoMesa.objects.get(\n sessao_legislativa_id=int(request.POST['sessao']),\n parlamentar_id=int(ids[0]),\n cargo_id=int(ids[1])\n )\n composicao.delete()\n return redirect('parlamentares:mesa_diretora')\n else:\n mesa = ComposicaoMesa.objects.filter(\n sessao_legislativa=request.POST['sessao'])\n\n cargos_ocupados = [m.cargo for m in mesa]\n cargos = CargoMesa.objects.all()\n cargos_vagos = list(set(cargos) - set(cargos_ocupados))\n\n parlamentares = Legislatura.objects.get(\n id=int(request.POST['legislatura'])).mandato_set.all()\n parlamentares_ocupados = [m.parlamentar for m in mesa]\n parlamentares_vagos = list(\n set(\n [p.parlamentar for p in parlamentares]) - set(\n parlamentares_ocupados))\n return self.render_to_response(\n {'legislaturas': Legislatura.objects.all(\n ).order_by('-data_inicio'),\n 'legislatura_selecionada': Legislatura.objects.get(\n id=int(request.POST['legislatura'])),\n 'sessoes': SessaoLegislativa.objects.filter(\n legislatura_id=int(request.POST['legislatura'])),\n 'sessao_selecionada': SessaoLegislativa.objects.get(\n id=int(request.POST['sessao'])),\n 'composicao_mesa': mesa,\n 'parlamentares': parlamentares_vagos,\n 'cargos_vagos': cargos_vagos\n })\n", "path": "parlamentares/views.py"}]} | 3,064 | 194 |
gh_patches_debug_22326 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error while training model with config "ner_conll2003_pos"
I'm trying to train a NER model using "train_model(configs.ner.ner_conll2003_pos)" on Colab. There are only three things I've changed in original ner_conll2003_pos.json file: number of epochs = 1, DOWNLOADS_PATH and MODELS_PATH. After I start, it terminates with this error:
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-5-683c58afa1f4> in <module>()
1 from deeppavlov import configs, train_model
----> 2 ner_model = train_model(configs.ner.ner_conll2003_pos)
/usr/local/lib/python3.6/dist-packages/deeppavlov/__init__.py in train_model(config, download, recursive)
29 # TODO: make better
30 def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:
---> 31 train_evaluate_model_from_config(config, download=download, recursive=recursive)
32 return build_model(config, load_trained=True)
33
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/commands/train.py in train_evaluate_model_from_config(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)
119
120 if to_train:
--> 121 trainer.train(iterator)
122
123 res = {}
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train(self, iterator)
292 if callable(getattr(self._chainer, 'train_on_batch', None)):
293 try:
--> 294 self.train_on_batches(iterator)
295 except KeyboardInterrupt:
296 log.info('Stopped training')
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train_on_batches(self, iterator)
232 self.start_time = time.time()
233 if self.validate_first:
--> 234 self._validate(iterator)
235
236 while True:
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in _validate(self, iterator, tensorboard_tag, tensorboard_index)
142 self._send_event(event_name='before_validation')
143 report = self.test(iterator.gen_batches(self.batch_size, data_type='valid', shuffle=False),
--> 144 start_time=self.start_time)
145
146 report['epochs_done'] = self.epoch
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/fit_trainer.py in test(self, data, metrics, start_time, show_examples)
204 for x, y_true in data:
205 examples += len(x)
--> 206 y_predicted = list(self._chainer.compute(list(x), list(y_true), targets=expected_outputs))
207 if len(expected_outputs) == 1:
208 y_predicted = [y_predicted]
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in compute(self, x, y, targets)
141 in_params += self.in_y
142
--> 143 return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)
144
145 def __call__(self, *args):
/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in _compute(***failed resolving arguments***)
167 res = component(**dict(zip(in_keys, x)))
168 else:
--> 169 res = component(*x)
170 if len(out_params) == 1:
171 mem[out_params[0]] = res
/usr/local/lib/python3.6/dist-packages/deeppavlov/models/preprocessors/one_hotter.py in __call__(self, batch, **kwargs)
68 one_hotted_utt = np.sum(one_hotted_utt, axis=0)
69
---> 70 one_hotted_batch.append(one_hotted_utt)
71
72 if self._pad_zeros:
UnboundLocalError: local variable 'one_hotted_utt' referenced before assignment
-----------------------------------------------------------------------------------------------
How can I fix this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deeppavlov/models/preprocessors/one_hotter.py`
Content:
```
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import List, Union
16
17 import numpy as np
18
19 from deeppavlov.core.common.errors import ConfigError
20 from deeppavlov.core.common.registry import register
21 from deeppavlov.core.data.utils import zero_pad
22 from deeppavlov.core.models.component import Component
23
24
25 @register('one_hotter')
26 class OneHotter(Component):
27 """
28 One-hot featurizer with zero-padding.
29 If ``single_vector``, return the only vector per sample which can have several elements equal to ``1``.
30
31 Parameters:
32 depth: the depth for one-hotting
33 pad_zeros: whether to pad elements of batch with zeros
34 single_vector: whether to return one vector for the sample (sum of each one-hotted vectors)
35 """
36 def __init__(self, depth: int, pad_zeros: bool = False,
37 single_vector=False, *args, **kwargs):
38 self._depth = depth
39 self._pad_zeros = pad_zeros
40 self.single_vector = single_vector
41 if self._pad_zeros and self.single_vector:
42 raise ConfigError("Cannot perform ``single_vector`` with zero padding for OneHotter")
43
44 def __call__(self, batch: List[List[int]], **kwargs) -> Union[List[List[np.ndarray]], List[np.ndarray]]:
45 """
46 Convert given batch of list of labels to one-hot representation of the batch.
47
48 Args:
49 batch: list of samples, where each sample is a list of integer labels.
50 **kwargs: additional arguments
51
52 Returns:
53 if ``single_vector``, list of one-hot representations of each sample,
54 otherwise, list of lists of one-hot representations of each label in a sample
55 """
56 one_hotted_batch = []
57
58 for utt in batch:
59 if isinstance(utt, list):
60 one_hotted_utt = self._to_one_hot(utt, self._depth)
61 elif isinstance(utt, int):
62 if self._pad_zeros or self.single_vector:
63 one_hotted_utt = self._to_one_hot([utt], self._depth)
64 else:
65 one_hotted_utt = self._to_one_hot([utt], self._depth).reshape(-1)
66
67 if self.single_vector:
68 one_hotted_utt = np.sum(one_hotted_utt, axis=0)
69
70 one_hotted_batch.append(one_hotted_utt)
71
72 if self._pad_zeros:
73 one_hotted_batch = zero_pad(one_hotted_batch)
74 return one_hotted_batch
75
76 @staticmethod
77 def _to_one_hot(x, n):
78 b = np.zeros([len(x), n], dtype=np.float32)
79 for q, tok in enumerate(x):
80 b[q, tok] = 1
81 return b
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deeppavlov/models/preprocessors/one_hotter.py b/deeppavlov/models/preprocessors/one_hotter.py
--- a/deeppavlov/models/preprocessors/one_hotter.py
+++ b/deeppavlov/models/preprocessors/one_hotter.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Union
+from typing import List, Union, Iterable
import numpy as np
@@ -56,7 +56,7 @@
one_hotted_batch = []
for utt in batch:
- if isinstance(utt, list):
+ if isinstance(utt, Iterable):
one_hotted_utt = self._to_one_hot(utt, self._depth)
elif isinstance(utt, int):
if self._pad_zeros or self.single_vector:
@@ -77,5 +77,5 @@
def _to_one_hot(x, n):
b = np.zeros([len(x), n], dtype=np.float32)
for q, tok in enumerate(x):
- b[q, tok] = 1
+ b[q, int(tok)] = 1
return b
| {"golden_diff": "diff --git a/deeppavlov/models/preprocessors/one_hotter.py b/deeppavlov/models/preprocessors/one_hotter.py\n--- a/deeppavlov/models/preprocessors/one_hotter.py\n+++ b/deeppavlov/models/preprocessors/one_hotter.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import List, Union\n+from typing import List, Union, Iterable\n \n import numpy as np\n \n@@ -56,7 +56,7 @@\n one_hotted_batch = []\n \n for utt in batch:\n- if isinstance(utt, list):\n+ if isinstance(utt, Iterable):\n one_hotted_utt = self._to_one_hot(utt, self._depth)\n elif isinstance(utt, int):\n if self._pad_zeros or self.single_vector:\n@@ -77,5 +77,5 @@\n def _to_one_hot(x, n):\n b = np.zeros([len(x), n], dtype=np.float32)\n for q, tok in enumerate(x):\n- b[q, tok] = 1\n+ b[q, int(tok)] = 1\n return b\n", "issue": "Error while training model with config \"ner_conll2003_pos\"\nI'm trying to train a NER model using \"train_model(configs.ner.ner_conll2003_pos)\" on Colab. There are only three things I've changed in original ner_conll2003_pos.json file: number of epochs = 1, DOWNLOADS_PATH and MODELS_PATH. After I start, it terminates with this error:\r\n\r\n---------------------------------------------------------------------------\r\nUnboundLocalError Traceback (most recent call last)\r\n<ipython-input-5-683c58afa1f4> in <module>()\r\n 1 from deeppavlov import configs, train_model\r\n----> 2 ner_model = train_model(configs.ner.ner_conll2003_pos)\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/__init__.py in train_model(config, download, recursive)\r\n 29 # TODO: make better\r\n 30 def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:\r\n---> 31 train_evaluate_model_from_config(config, download=download, recursive=recursive)\r\n 32 return build_model(config, load_trained=True)\r\n 33 \r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/commands/train.py in train_evaluate_model_from_config(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)\r\n 119 \r\n 120 if to_train:\r\n--> 121 trainer.train(iterator)\r\n 122 \r\n 123 res = {}\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train(self, iterator)\r\n 292 if callable(getattr(self._chainer, 'train_on_batch', None)):\r\n 293 try:\r\n--> 294 self.train_on_batches(iterator)\r\n 295 except KeyboardInterrupt:\r\n 296 log.info('Stopped training')\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in train_on_batches(self, iterator)\r\n 232 self.start_time = time.time()\r\n 233 if self.validate_first:\r\n--> 234 self._validate(iterator)\r\n 235 \r\n 236 while True:\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/nn_trainer.py in _validate(self, iterator, tensorboard_tag, tensorboard_index)\r\n 142 self._send_event(event_name='before_validation')\r\n 143 report = self.test(iterator.gen_batches(self.batch_size, data_type='valid', shuffle=False),\r\n--> 144 start_time=self.start_time)\r\n 145 \r\n 146 report['epochs_done'] = self.epoch\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/trainers/fit_trainer.py in test(self, data, metrics, start_time, show_examples)\r\n 204 for x, y_true in data:\r\n 205 examples += len(x)\r\n--> 206 y_predicted = list(self._chainer.compute(list(x), list(y_true), targets=expected_outputs))\r\n 207 if len(expected_outputs) == 1:\r\n 208 y_predicted = [y_predicted]\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in compute(self, x, y, targets)\r\n 141 in_params += self.in_y\r\n 142 \r\n--> 143 return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)\r\n 144 \r\n 145 def __call__(self, *args):\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/core/common/chainer.py in _compute(***failed resolving arguments***)\r\n 167 res = component(**dict(zip(in_keys, x)))\r\n 168 else:\r\n--> 169 res = component(*x)\r\n 170 if len(out_params) == 1:\r\n 171 mem[out_params[0]] = res\r\n\r\n/usr/local/lib/python3.6/dist-packages/deeppavlov/models/preprocessors/one_hotter.py in __call__(self, batch, **kwargs)\r\n 68 one_hotted_utt = np.sum(one_hotted_utt, axis=0)\r\n 69 \r\n---> 70 one_hotted_batch.append(one_hotted_utt)\r\n 71 \r\n 72 if self._pad_zeros:\r\n\r\nUnboundLocalError: local variable 'one_hotted_utt' referenced before assignment\r\n-----------------------------------------------------------------------------------------------\r\n\r\nHow can I fix this? \n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Union\n\nimport numpy as np\n\nfrom deeppavlov.core.common.errors import ConfigError\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.utils import zero_pad\nfrom deeppavlov.core.models.component import Component\n\n\n@register('one_hotter')\nclass OneHotter(Component):\n \"\"\"\n One-hot featurizer with zero-padding.\n If ``single_vector``, return the only vector per sample which can have several elements equal to ``1``.\n\n Parameters:\n depth: the depth for one-hotting\n pad_zeros: whether to pad elements of batch with zeros\n single_vector: whether to return one vector for the sample (sum of each one-hotted vectors)\n \"\"\"\n def __init__(self, depth: int, pad_zeros: bool = False,\n single_vector=False, *args, **kwargs):\n self._depth = depth\n self._pad_zeros = pad_zeros\n self.single_vector = single_vector\n if self._pad_zeros and self.single_vector:\n raise ConfigError(\"Cannot perform ``single_vector`` with zero padding for OneHotter\")\n\n def __call__(self, batch: List[List[int]], **kwargs) -> Union[List[List[np.ndarray]], List[np.ndarray]]:\n \"\"\"\n Convert given batch of list of labels to one-hot representation of the batch.\n\n Args:\n batch: list of samples, where each sample is a list of integer labels.\n **kwargs: additional arguments\n\n Returns:\n if ``single_vector``, list of one-hot representations of each sample,\n otherwise, list of lists of one-hot representations of each label in a sample\n \"\"\"\n one_hotted_batch = []\n\n for utt in batch:\n if isinstance(utt, list):\n one_hotted_utt = self._to_one_hot(utt, self._depth)\n elif isinstance(utt, int):\n if self._pad_zeros or self.single_vector:\n one_hotted_utt = self._to_one_hot([utt], self._depth)\n else:\n one_hotted_utt = self._to_one_hot([utt], self._depth).reshape(-1)\n\n if self.single_vector:\n one_hotted_utt = np.sum(one_hotted_utt, axis=0)\n\n one_hotted_batch.append(one_hotted_utt)\n\n if self._pad_zeros:\n one_hotted_batch = zero_pad(one_hotted_batch)\n return one_hotted_batch\n\n @staticmethod\n def _to_one_hot(x, n):\n b = np.zeros([len(x), n], dtype=np.float32)\n for q, tok in enumerate(x):\n b[q, tok] = 1\n return b\n", "path": "deeppavlov/models/preprocessors/one_hotter.py"}], "after_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Union, Iterable\n\nimport numpy as np\n\nfrom deeppavlov.core.common.errors import ConfigError\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.utils import zero_pad\nfrom deeppavlov.core.models.component import Component\n\n\n@register('one_hotter')\nclass OneHotter(Component):\n \"\"\"\n One-hot featurizer with zero-padding.\n If ``single_vector``, return the only vector per sample which can have several elements equal to ``1``.\n\n Parameters:\n depth: the depth for one-hotting\n pad_zeros: whether to pad elements of batch with zeros\n single_vector: whether to return one vector for the sample (sum of each one-hotted vectors)\n \"\"\"\n def __init__(self, depth: int, pad_zeros: bool = False,\n single_vector=False, *args, **kwargs):\n self._depth = depth\n self._pad_zeros = pad_zeros\n self.single_vector = single_vector\n if self._pad_zeros and self.single_vector:\n raise ConfigError(\"Cannot perform ``single_vector`` with zero padding for OneHotter\")\n\n def __call__(self, batch: List[List[int]], **kwargs) -> Union[List[List[np.ndarray]], List[np.ndarray]]:\n \"\"\"\n Convert given batch of list of labels to one-hot representation of the batch.\n\n Args:\n batch: list of samples, where each sample is a list of integer labels.\n **kwargs: additional arguments\n\n Returns:\n if ``single_vector``, list of one-hot representations of each sample,\n otherwise, list of lists of one-hot representations of each label in a sample\n \"\"\"\n one_hotted_batch = []\n\n for utt in batch:\n if isinstance(utt, Iterable):\n one_hotted_utt = self._to_one_hot(utt, self._depth)\n elif isinstance(utt, int):\n if self._pad_zeros or self.single_vector:\n one_hotted_utt = self._to_one_hot([utt], self._depth)\n else:\n one_hotted_utt = self._to_one_hot([utt], self._depth).reshape(-1)\n\n if self.single_vector:\n one_hotted_utt = np.sum(one_hotted_utt, axis=0)\n\n one_hotted_batch.append(one_hotted_utt)\n\n if self._pad_zeros:\n one_hotted_batch = zero_pad(one_hotted_batch)\n return one_hotted_batch\n\n @staticmethod\n def _to_one_hot(x, n):\n b = np.zeros([len(x), n], dtype=np.float32)\n for q, tok in enumerate(x):\n b[q, int(tok)] = 1\n return b\n", "path": "deeppavlov/models/preprocessors/one_hotter.py"}]} | 2,223 | 275 |
gh_patches_debug_43152 | rasdani/github-patches | git_diff | sktime__sktime-3561 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `CNNRegressor` missing essential initializing parameters
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
`CNNRegressor` misses parameters like `optimizer` in its constructor. There might be more things which don't coincide with `CNNClassifier` which should be added but since I'm not sure what all things might be missing, I'm opening this issue to bring it to notice.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/regression/deep_learning/cnn.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Time Convolutional Neural Network (CNN) for regression."""
3
4 __author__ = ["AurumnPegasus"]
5 __all__ = ["CNNRegressor"]
6
7 from sktime.networks.cnn import CNNNetwork
8 from sktime.regression.deep_learning.base import BaseDeepRegressor
9 from sktime.utils.validation._dependencies import _check_dl_dependencies
10
11 _check_dl_dependencies(severity="warning")
12
13
14 class CNNRegressor(BaseDeepRegressor):
15 """Time Convolutional Neural Network (CNN), as described in [1].
16
17 Parameters
18 ----------
19 should inherited fields be listed here?
20 n_epochs : int, default = 2000
21 the number of epochs to train the model
22 batch_size : int, default = 16
23 the number of samples per gradient update.
24 kernel_size : int, default = 7
25 the length of the 1D convolution window
26 avg_pool_size : int, default = 3
27 size of the average pooling windows
28 n_conv_layers : int, default = 2
29 the number of convolutional plus average pooling layers
30 filter_sizes : array of shape (n_conv_layers) default = [6, 12]
31 random_state : int or None, default=None
32 Seed for random number generation.
33 verbose : boolean, default = False
34 whether to output extra information
35 loss : string, default="mean_squared_error"
36 fit parameter for the keras model
37 optimizer : keras.optimizer, default=keras.optimizers.Adam(),
38 metrics : list of strings, default=["accuracy"],
39
40 Notes
41 -----
42 .. [1] Zhao et. al, Convolutional neural networks for
43 time series classification, Journal of
44 Systems Engineering and Electronics, 28(1):2017.
45
46 Adapted from the implementation from Fawaz et. al
47 https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py
48 """
49
50 def __init__(
51 self,
52 n_epochs=2000,
53 batch_size=16,
54 kernel_size=7,
55 avg_pool_size=3,
56 n_conv_layers=2,
57 callbacks=None,
58 verbose=False,
59 loss="mean_squared_error",
60 metrics=None,
61 random_seed=0,
62 ):
63 _check_dl_dependencies(severity="error")
64 super(CNNRegressor, self).__init__(
65 batch_size=batch_size,
66 )
67 self.n_conv_layers = n_conv_layers
68 self.avg_pool_size = avg_pool_size
69 self.kernel_size = kernel_size
70 self.callbacks = callbacks
71 self.n_epochs = n_epochs
72 self.batch_size = batch_size
73 self.verbose = verbose
74 self.loss = loss
75 self.metrics = metrics
76 self.random_seed = random_seed
77 self._network = CNNNetwork()
78
79 def build_model(self, input_shape, **kwargs):
80 """Construct a compiled, un-trained, keras model that is ready for training.
81
82 In sktime, time series are stored in numpy arrays of shape (d,m), where d
83 is the number of dimensions, m is the series length. Keras/tensorflow assume
84 data is in shape (m,d). This method also assumes (m,d). Transpose should
85 happen in fit.
86
87 Parameters
88 ----------
89 input_shape : tuple
90 The shape of the data fed into the input layer, should be (m,d)
91
92 Returns
93 -------
94 output : a compiled Keras Model
95 """
96 import tensorflow as tf
97 from tensorflow import keras
98
99 tf.random.set_seed(self.random_seed)
100
101 if self.metrics is None:
102 metrics = ["accuracy"]
103 else:
104 metrics = self.metrics
105
106 input_layer, output_layer = self._network.build_network(input_shape, **kwargs)
107
108 output_layer = keras.layers.Dense(units=1, activation="sigmoid")(output_layer)
109
110 model = keras.models.Model(inputs=input_layer, outputs=output_layer)
111
112 model.compile(
113 loss=self.loss,
114 optimizer=keras.optimizers.Adam(),
115 metrics=metrics,
116 )
117 return model
118
119 def _fit(self, X, y):
120 """Fit the classifier on the training set (X, y).
121
122 Parameters
123 ----------
124 X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))
125 The training input samples.
126 y : np.ndarray of shape n
127 The training data class labels.
128
129 Returns
130 -------
131 self : object
132 """
133 if self.callbacks is None:
134 self._callbacks = []
135
136 # Transpose to conform to Keras input style.
137 X = X.transpose(0, 2, 1)
138
139 self.input_shape = X.shape[1:]
140 self.model_ = self.build_model(self.input_shape)
141 if self.verbose:
142 self.model.summary()
143
144 self.history = self.model_.fit(
145 X,
146 y,
147 batch_size=self.batch_size,
148 epochs=self.n_epochs,
149 verbose=self.verbose,
150 callbacks=self._callbacks,
151 )
152 return self
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/regression/deep_learning/cnn.py b/sktime/regression/deep_learning/cnn.py
--- a/sktime/regression/deep_learning/cnn.py
+++ b/sktime/regression/deep_learning/cnn.py
@@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
"""Time Convolutional Neural Network (CNN) for regression."""
-__author__ = ["AurumnPegasus"]
+__author__ = ["AurumnPegasus", "achieveordie"]
__all__ = ["CNNRegressor"]
+from sklearn.utils import check_random_state
+
from sktime.networks.cnn import CNNNetwork
from sktime.regression.deep_learning.base import BaseDeepRegressor
from sktime.utils.validation._dependencies import _check_dl_dependencies
@@ -34,7 +36,12 @@
whether to output extra information
loss : string, default="mean_squared_error"
fit parameter for the keras model
- optimizer : keras.optimizer, default=keras.optimizers.Adam(),
+ activation : keras.activations or string, default `linear`
+ function to use in the output layer.
+ optimizer : keras.optimizers or string, default `None`.
+ when `None`, internally uses `keras.optimizers.Adam(0.01)`
+ use_bias : bool, default=True
+ whether to use bias in the output layer.
metrics : list of strings, default=["accuracy"],
Notes
@@ -58,7 +65,10 @@
verbose=False,
loss="mean_squared_error",
metrics=None,
- random_seed=0,
+ random_state=0,
+ activation="linear",
+ use_bias=True,
+ optimizer=None,
):
_check_dl_dependencies(severity="error")
super(CNNRegressor, self).__init__(
@@ -73,7 +83,11 @@
self.verbose = verbose
self.loss = loss
self.metrics = metrics
- self.random_seed = random_seed
+ self.random_state = random_state
+ self.activation = activation
+ self.use_bias = use_bias
+ self.optimizer = optimizer
+ self.history = None
self._network = CNNNetwork()
def build_model(self, input_shape, **kwargs):
@@ -96,7 +110,7 @@
import tensorflow as tf
from tensorflow import keras
- tf.random.set_seed(self.random_seed)
+ tf.random.set_seed(self.random_state)
if self.metrics is None:
metrics = ["accuracy"]
@@ -105,13 +119,23 @@
input_layer, output_layer = self._network.build_network(input_shape, **kwargs)
- output_layer = keras.layers.Dense(units=1, activation="sigmoid")(output_layer)
+ output_layer = keras.layers.Dense(
+ units=1,
+ activation=self.activation,
+ use_bias=self.use_bias,
+ )(output_layer)
+
+ self.optimizer_ = (
+ keras.optimizers.Adam(learning_rate=0.01)
+ if self.optimizer is None
+ else self.optimizer
+ )
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(
loss=self.loss,
- optimizer=keras.optimizers.Adam(),
+ optimizer=self.optimizer_,
metrics=metrics,
)
return model
@@ -136,6 +160,7 @@
# Transpose to conform to Keras input style.
X = X.transpose(0, 2, 1)
+ check_random_state(self.random_state)
self.input_shape = X.shape[1:]
self.model_ = self.build_model(self.input_shape)
if self.verbose:
| {"golden_diff": "diff --git a/sktime/regression/deep_learning/cnn.py b/sktime/regression/deep_learning/cnn.py\n--- a/sktime/regression/deep_learning/cnn.py\n+++ b/sktime/regression/deep_learning/cnn.py\n@@ -1,9 +1,11 @@\n # -*- coding: utf-8 -*-\n \"\"\"Time Convolutional Neural Network (CNN) for regression.\"\"\"\n \n-__author__ = [\"AurumnPegasus\"]\n+__author__ = [\"AurumnPegasus\", \"achieveordie\"]\n __all__ = [\"CNNRegressor\"]\n \n+from sklearn.utils import check_random_state\n+\n from sktime.networks.cnn import CNNNetwork\n from sktime.regression.deep_learning.base import BaseDeepRegressor\n from sktime.utils.validation._dependencies import _check_dl_dependencies\n@@ -34,7 +36,12 @@\n whether to output extra information\n loss : string, default=\"mean_squared_error\"\n fit parameter for the keras model\n- optimizer : keras.optimizer, default=keras.optimizers.Adam(),\n+ activation : keras.activations or string, default `linear`\n+ function to use in the output layer.\n+ optimizer : keras.optimizers or string, default `None`.\n+ when `None`, internally uses `keras.optimizers.Adam(0.01)`\n+ use_bias : bool, default=True\n+ whether to use bias in the output layer.\n metrics : list of strings, default=[\"accuracy\"],\n \n Notes\n@@ -58,7 +65,10 @@\n verbose=False,\n loss=\"mean_squared_error\",\n metrics=None,\n- random_seed=0,\n+ random_state=0,\n+ activation=\"linear\",\n+ use_bias=True,\n+ optimizer=None,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(CNNRegressor, self).__init__(\n@@ -73,7 +83,11 @@\n self.verbose = verbose\n self.loss = loss\n self.metrics = metrics\n- self.random_seed = random_seed\n+ self.random_state = random_state\n+ self.activation = activation\n+ self.use_bias = use_bias\n+ self.optimizer = optimizer\n+ self.history = None\n self._network = CNNNetwork()\n \n def build_model(self, input_shape, **kwargs):\n@@ -96,7 +110,7 @@\n import tensorflow as tf\n from tensorflow import keras\n \n- tf.random.set_seed(self.random_seed)\n+ tf.random.set_seed(self.random_state)\n \n if self.metrics is None:\n metrics = [\"accuracy\"]\n@@ -105,13 +119,23 @@\n \n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n \n- output_layer = keras.layers.Dense(units=1, activation=\"sigmoid\")(output_layer)\n+ output_layer = keras.layers.Dense(\n+ units=1,\n+ activation=self.activation,\n+ use_bias=self.use_bias,\n+ )(output_layer)\n+\n+ self.optimizer_ = (\n+ keras.optimizers.Adam(learning_rate=0.01)\n+ if self.optimizer is None\n+ else self.optimizer\n+ )\n \n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n \n model.compile(\n loss=self.loss,\n- optimizer=keras.optimizers.Adam(),\n+ optimizer=self.optimizer_,\n metrics=metrics,\n )\n return model\n@@ -136,6 +160,7 @@\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n \n+ check_random_state(self.random_state)\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape)\n if self.verbose:\n", "issue": "[BUG] `CNNRegressor` missing essential initializing parameters\n**Describe the bug**\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\n`CNNRegressor` misses parameters like `optimizer` in its constructor. There might be more things which don't coincide with `CNNClassifier` which should be added but since I'm not sure what all things might be missing, I'm opening this issue to bring it to notice.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Time Convolutional Neural Network (CNN) for regression.\"\"\"\n\n__author__ = [\"AurumnPegasus\"]\n__all__ = [\"CNNRegressor\"]\n\nfrom sktime.networks.cnn import CNNNetwork\nfrom sktime.regression.deep_learning.base import BaseDeepRegressor\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass CNNRegressor(BaseDeepRegressor):\n \"\"\"Time Convolutional Neural Network (CNN), as described in [1].\n\n Parameters\n ----------\n should inherited fields be listed here?\n n_epochs : int, default = 2000\n the number of epochs to train the model\n batch_size : int, default = 16\n the number of samples per gradient update.\n kernel_size : int, default = 7\n the length of the 1D convolution window\n avg_pool_size : int, default = 3\n size of the average pooling windows\n n_conv_layers : int, default = 2\n the number of convolutional plus average pooling layers\n filter_sizes : array of shape (n_conv_layers) default = [6, 12]\n random_state : int or None, default=None\n Seed for random number generation.\n verbose : boolean, default = False\n whether to output extra information\n loss : string, default=\"mean_squared_error\"\n fit parameter for the keras model\n optimizer : keras.optimizer, default=keras.optimizers.Adam(),\n metrics : list of strings, default=[\"accuracy\"],\n\n Notes\n -----\n .. [1] Zhao et. al, Convolutional neural networks for\n time series classification, Journal of\n Systems Engineering and Electronics, 28(1):2017.\n\n Adapted from the implementation from Fawaz et. al\n https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py\n \"\"\"\n\n def __init__(\n self,\n n_epochs=2000,\n batch_size=16,\n kernel_size=7,\n avg_pool_size=3,\n n_conv_layers=2,\n callbacks=None,\n verbose=False,\n loss=\"mean_squared_error\",\n metrics=None,\n random_seed=0,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(CNNRegressor, self).__init__(\n batch_size=batch_size,\n )\n self.n_conv_layers = n_conv_layers\n self.avg_pool_size = avg_pool_size\n self.kernel_size = kernel_size\n self.callbacks = callbacks\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.verbose = verbose\n self.loss = loss\n self.metrics = metrics\n self.random_seed = random_seed\n self._network = CNNNetwork()\n\n def build_model(self, input_shape, **kwargs):\n \"\"\"Construct a compiled, un-trained, keras model that is ready for training.\n\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\n is the number of dimensions, m is the series length. Keras/tensorflow assume\n data is in shape (m,d). This method also assumes (m,d). Transpose should\n happen in fit.\n\n Parameters\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer, should be (m,d)\n\n Returns\n -------\n output : a compiled Keras Model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_seed)\n\n if self.metrics is None:\n metrics = [\"accuracy\"]\n else:\n metrics = self.metrics\n\n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(units=1, activation=\"sigmoid\")(output_layer)\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n\n model.compile(\n loss=self.loss,\n optimizer=keras.optimizers.Adam(),\n metrics=metrics,\n )\n return model\n\n def _fit(self, X, y):\n \"\"\"Fit the classifier on the training set (X, y).\n\n Parameters\n ----------\n X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))\n The training input samples.\n y : np.ndarray of shape n\n The training data class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n if self.callbacks is None:\n self._callbacks = []\n\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape)\n if self.verbose:\n self.model.summary()\n\n self.history = self.model_.fit(\n X,\n y,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n return self\n", "path": "sktime/regression/deep_learning/cnn.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Time Convolutional Neural Network (CNN) for regression.\"\"\"\n\n__author__ = [\"AurumnPegasus\", \"achieveordie\"]\n__all__ = [\"CNNRegressor\"]\n\nfrom sklearn.utils import check_random_state\n\nfrom sktime.networks.cnn import CNNNetwork\nfrom sktime.regression.deep_learning.base import BaseDeepRegressor\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass CNNRegressor(BaseDeepRegressor):\n \"\"\"Time Convolutional Neural Network (CNN), as described in [1].\n\n Parameters\n ----------\n should inherited fields be listed here?\n n_epochs : int, default = 2000\n the number of epochs to train the model\n batch_size : int, default = 16\n the number of samples per gradient update.\n kernel_size : int, default = 7\n the length of the 1D convolution window\n avg_pool_size : int, default = 3\n size of the average pooling windows\n n_conv_layers : int, default = 2\n the number of convolutional plus average pooling layers\n filter_sizes : array of shape (n_conv_layers) default = [6, 12]\n random_state : int or None, default=None\n Seed for random number generation.\n verbose : boolean, default = False\n whether to output extra information\n loss : string, default=\"mean_squared_error\"\n fit parameter for the keras model\n activation : keras.activations or string, default `linear`\n function to use in the output layer.\n optimizer : keras.optimizers or string, default `None`.\n when `None`, internally uses `keras.optimizers.Adam(0.01)`\n use_bias : bool, default=True\n whether to use bias in the output layer.\n metrics : list of strings, default=[\"accuracy\"],\n\n Notes\n -----\n .. [1] Zhao et. al, Convolutional neural networks for\n time series classification, Journal of\n Systems Engineering and Electronics, 28(1):2017.\n\n Adapted from the implementation from Fawaz et. al\n https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py\n \"\"\"\n\n def __init__(\n self,\n n_epochs=2000,\n batch_size=16,\n kernel_size=7,\n avg_pool_size=3,\n n_conv_layers=2,\n callbacks=None,\n verbose=False,\n loss=\"mean_squared_error\",\n metrics=None,\n random_state=0,\n activation=\"linear\",\n use_bias=True,\n optimizer=None,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(CNNRegressor, self).__init__(\n batch_size=batch_size,\n )\n self.n_conv_layers = n_conv_layers\n self.avg_pool_size = avg_pool_size\n self.kernel_size = kernel_size\n self.callbacks = callbacks\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.verbose = verbose\n self.loss = loss\n self.metrics = metrics\n self.random_state = random_state\n self.activation = activation\n self.use_bias = use_bias\n self.optimizer = optimizer\n self.history = None\n self._network = CNNNetwork()\n\n def build_model(self, input_shape, **kwargs):\n \"\"\"Construct a compiled, un-trained, keras model that is ready for training.\n\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\n is the number of dimensions, m is the series length. Keras/tensorflow assume\n data is in shape (m,d). This method also assumes (m,d). Transpose should\n happen in fit.\n\n Parameters\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer, should be (m,d)\n\n Returns\n -------\n output : a compiled Keras Model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_state)\n\n if self.metrics is None:\n metrics = [\"accuracy\"]\n else:\n metrics = self.metrics\n\n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(\n units=1,\n activation=self.activation,\n use_bias=self.use_bias,\n )(output_layer)\n\n self.optimizer_ = (\n keras.optimizers.Adam(learning_rate=0.01)\n if self.optimizer is None\n else self.optimizer\n )\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n\n model.compile(\n loss=self.loss,\n optimizer=self.optimizer_,\n metrics=metrics,\n )\n return model\n\n def _fit(self, X, y):\n \"\"\"Fit the classifier on the training set (X, y).\n\n Parameters\n ----------\n X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))\n The training input samples.\n y : np.ndarray of shape n\n The training data class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n if self.callbacks is None:\n self._callbacks = []\n\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n\n check_random_state(self.random_state)\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape)\n if self.verbose:\n self.model.summary()\n\n self.history = self.model_.fit(\n X,\n y,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n return self\n", "path": "sktime/regression/deep_learning/cnn.py"}]} | 1,807 | 828 |
gh_patches_debug_5953 | rasdani/github-patches | git_diff | dask__distributed-1331 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError / hang on LocalCluster startup
I'm loading my conda environment off NAS storage, and when usage is heavy, there can be long delays in loading things, which I believe can exceed the default timeout (five seconds) here:
https://github.com/dask/distributed/blob/master/distributed/client.py#L428
As a result I see this on a call to `LocalCluster()`:
Traceback (most recent call last):
File "lib/python3.5/site-packages/tornado/gen.py", line 910, in error_callback
future.result()
File "lib/python3.5/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "lib/python3.5/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "lib/python3.5/site-packages/distributed/nanny.py", line 300, in start
yield self._wait_until_running()
File "lib/python3.5/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "lib/python3.5/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "lib/python3.5/site-packages/tornado/gen.py", line 1069, in run
yielded = self.gen.send(value)
File "lib/python3.5/site-packages/distributed/nanny.py", line 386, in _wait_until_running
raise ValueError("Worker not started")
ValueError: Worker not started
This `ValueError` does not seem to be passed up to my application; my application seems frozen waiting for the cluster start up (not 100% sure where) - ie, it never returns from the `LocalCluster()` call. Although the multiprocessing pool actually does start up, in this case too late to matter.
How can I catch / retry this situation?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/cli/dask_worker.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import atexit
4 from datetime import timedelta
5 from functools import partial
6 import json
7 import logging
8 import os
9 import shutil
10 import signal
11 from sys import exit
12 from time import sleep
13
14 import click
15 from distributed import Nanny, Worker, rpc
16 from distributed.utils import All, get_ip_interface
17 from distributed.worker import _ncores
18 from distributed.http import HTTPWorker
19 from distributed.metrics import time
20 from distributed.security import Security
21 from distributed.cli.utils import check_python_3, uri_from_host_port
22
23 from toolz import valmap
24 from tornado.ioloop import IOLoop, TimeoutError
25 from tornado import gen
26
27 logger = logging.getLogger('distributed.dask_worker')
28
29
30 pem_file_option_type = click.Path(exists=True, resolve_path=True)
31
32 @click.command()
33 @click.argument('scheduler', type=str, required=False)
34 @click.option('--tls-ca-file', type=pem_file_option_type, default=None,
35 help="CA cert(s) file for TLS (in PEM format)")
36 @click.option('--tls-cert', type=pem_file_option_type, default=None,
37 help="certificate file for TLS (in PEM format)")
38 @click.option('--tls-key', type=pem_file_option_type, default=None,
39 help="private key file for TLS (in PEM format)")
40 @click.option('--worker-port', type=int, default=0,
41 help="Serving computation port, defaults to random")
42 @click.option('--http-port', type=int, default=0,
43 help="Serving http port, defaults to random")
44 @click.option('--nanny-port', type=int, default=0,
45 help="Serving nanny port, defaults to random")
46 @click.option('--bokeh-port', type=int, default=8789,
47 help="Bokeh port, defaults to 8789")
48 @click.option('--bokeh/--no-bokeh', 'bokeh', default=True, show_default=True,
49 required=False, help="Launch Bokeh Web UI")
50 @click.option('--host', type=str, default=None,
51 help="Serving host. Should be an ip address that is"
52 " visible to the scheduler and other workers. "
53 "See --interface.")
54 @click.option('--interface', type=str, default=None,
55 help="Network interface like 'eth0' or 'ib0'")
56 @click.option('--nthreads', type=int, default=0,
57 help="Number of threads per process.")
58 @click.option('--nprocs', type=int, default=1,
59 help="Number of worker processes. Defaults to one.")
60 @click.option('--name', type=str, default='',
61 help="A unique name for this worker like 'worker-1'")
62 @click.option('--memory-limit', default='auto',
63 help="Number of bytes before spilling data to disk. "
64 "This can be an integer (nbytes) "
65 "float (fraction of total memory) "
66 "or 'auto'")
67 @click.option('--reconnect/--no-reconnect', default=True,
68 help="Reconnect to scheduler if disconnected")
69 @click.option('--nanny/--no-nanny', default=True,
70 help="Start workers in nanny process for management")
71 @click.option('--pid-file', type=str, default='',
72 help="File to write the process PID")
73 @click.option('--local-directory', default='', type=str,
74 help="Directory to place worker files")
75 @click.option('--resources', type=str, default='',
76 help='Resources for task constraints like "GPU=2 MEM=10e9"')
77 @click.option('--scheduler-file', type=str, default='',
78 help='Filename to JSON encoded scheduler information. '
79 'Use with dask-scheduler --scheduler-file')
80 @click.option('--death-timeout', type=float, default=None,
81 help="Seconds to wait for a scheduler before closing")
82 @click.option('--bokeh-prefix', type=str, default=None,
83 help="Prefix for the bokeh app")
84 @click.option('--preload', type=str, multiple=True,
85 help='Module that should be loaded by each worker process '
86 'like "foo.bar" or "/path/to/foo.py"')
87 def main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,
88 nanny, name, memory_limit, pid_file, reconnect,
89 resources, bokeh, bokeh_port, local_directory, scheduler_file,
90 interface, death_timeout, preload, bokeh_prefix,
91 tls_ca_file, tls_cert, tls_key):
92 sec = Security(tls_ca_file=tls_ca_file,
93 tls_worker_cert=tls_cert,
94 tls_worker_key=tls_key,
95 )
96
97 if nanny:
98 port = nanny_port
99 else:
100 port = worker_port
101
102 if nprocs > 1 and worker_port != 0:
103 logger.error("Failed to launch worker. You cannot use the --port argument when nprocs > 1.")
104 exit(1)
105
106 if nprocs > 1 and name:
107 logger.error("Failed to launch worker. You cannot use the --name argument when nprocs > 1.")
108 exit(1)
109
110 if nprocs > 1 and not nanny:
111 logger.error("Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1.")
112 exit(1)
113
114 if not nthreads:
115 nthreads = _ncores // nprocs
116
117 if pid_file:
118 with open(pid_file, 'w') as f:
119 f.write(str(os.getpid()))
120
121 def del_pid_file():
122 if os.path.exists(pid_file):
123 os.remove(pid_file)
124 atexit.register(del_pid_file)
125
126 services = {('http', http_port): HTTPWorker}
127
128 if bokeh:
129 try:
130 from distributed.bokeh.worker import BokehWorker
131 except ImportError:
132 pass
133 else:
134 if bokeh_prefix:
135 result = (BokehWorker, {'prefix': bokeh_prefix})
136 else:
137 result = BokehWorker
138 services[('bokeh', bokeh_port)] = result
139
140 if resources:
141 resources = resources.replace(',', ' ').split()
142 resources = dict(pair.split('=') for pair in resources)
143 resources = valmap(float, resources)
144 else:
145 resources = None
146
147 loop = IOLoop.current()
148
149 if nanny:
150 kwargs = {'worker_port': worker_port}
151 t = Nanny
152 else:
153 kwargs = {}
154 if nanny_port:
155 kwargs['service_ports'] = {'nanny': nanny_port}
156 t = Worker
157
158 if scheduler_file:
159 while not os.path.exists(scheduler_file):
160 sleep(0.01)
161 for i in range(10):
162 try:
163 with open(scheduler_file) as f:
164 cfg = json.load(f)
165 scheduler = cfg['address']
166 break
167 except (ValueError, KeyError): # race with scheduler on file
168 sleep(0.01)
169
170 if not scheduler:
171 raise ValueError("Need to provide scheduler address like\n"
172 "dask-worker SCHEDULER_ADDRESS:8786")
173
174 if interface:
175 if host:
176 raise ValueError("Can not specify both interface and host")
177 else:
178 host = get_ip_interface(interface)
179
180 if host or port:
181 addr = uri_from_host_port(host, port, 0)
182 else:
183 # Choose appropriate address for scheduler
184 addr = None
185
186 nannies = [t(scheduler, ncores=nthreads,
187 services=services, name=name, loop=loop, resources=resources,
188 memory_limit=memory_limit, reconnect=reconnect,
189 local_dir=local_directory, death_timeout=death_timeout,
190 preload=preload, security=sec,
191 **kwargs)
192 for i in range(nprocs)]
193
194 @gen.coroutine
195 def close_all():
196 try:
197 if nanny:
198 yield [n._close(timeout=2) for n in nannies]
199 finally:
200 loop.stop()
201
202 def handle_signal(signum, frame):
203 logger.info("Exiting on signal %d", signum)
204 if loop._running:
205 loop.add_callback_from_signal(loop.stop)
206 else:
207 exit(0)
208
209 # NOTE: We can't use the generic install_signal_handlers() function from
210 # distributed.cli.utils because we're handling the signal differently.
211 signal.signal(signal.SIGINT, handle_signal)
212 signal.signal(signal.SIGTERM, handle_signal)
213
214 for n in nannies:
215 n.start(addr)
216
217 @gen.coroutine
218 def run():
219 while all(n.status != 'closed' for n in nannies):
220 yield gen.sleep(0.2)
221
222 try:
223 loop.run_sync(run)
224 except (KeyboardInterrupt, TimeoutError):
225 pass
226 finally:
227 logger.info("End worker")
228
229 # Clean exit: unregister all workers from scheduler
230 loop.run_sync(close_all)
231
232
233 def go():
234 check_python_3()
235 main()
236
237 if __name__ == '__main__':
238 go()
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/cli/dask_worker.py b/distributed/cli/dask_worker.py
--- a/distributed/cli/dask_worker.py
+++ b/distributed/cli/dask_worker.py
@@ -211,11 +211,9 @@
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
- for n in nannies:
- n.start(addr)
-
@gen.coroutine
def run():
+ yield [n.start(addr) for n in nannies]
while all(n.status != 'closed' for n in nannies):
yield gen.sleep(0.2)
| {"golden_diff": "diff --git a/distributed/cli/dask_worker.py b/distributed/cli/dask_worker.py\n--- a/distributed/cli/dask_worker.py\n+++ b/distributed/cli/dask_worker.py\n@@ -211,11 +211,9 @@\n signal.signal(signal.SIGINT, handle_signal)\n signal.signal(signal.SIGTERM, handle_signal)\n \n- for n in nannies:\n- n.start(addr)\n-\n @gen.coroutine\n def run():\n+ yield [n.start(addr) for n in nannies]\n while all(n.status != 'closed' for n in nannies):\n yield gen.sleep(0.2)\n", "issue": "ValueError / hang on LocalCluster startup\nI'm loading my conda environment off NAS storage, and when usage is heavy, there can be long delays in loading things, which I believe can exceed the default timeout (five seconds) here:\r\n\r\nhttps://github.com/dask/distributed/blob/master/distributed/client.py#L428\r\n\r\nAs a result I see this on a call to `LocalCluster()`:\r\n\r\n\tTraceback (most recent call last): \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 910, in error_callback \r\n\t\tfuture.result() \r\n\t File \"lib/python3.5/site-packages/tornado/concurrent.py\", line 238, in result \r\n\t\traise_exc_info(self._exc_info) \r\n\t File \"<string>\", line 4, in raise_exc_info \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 1063, in run \r\n\t\tyielded = self.gen.throw(*exc_info) \r\n\t File \"lib/python3.5/site-packages/distributed/nanny.py\", line 300, in start\r\n\t\tyield self._wait_until_running() \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 1055, in run \r\n\t\tvalue = future.result() \r\n\t File \"lib/python3.5/site-packages/tornado/concurrent.py\", line 238, in result\r\n\t\traise_exc_info(self._exc_info) \r\n\t File \"<string>\", line 4, in raise_exc_info \r\n\t File \"lib/python3.5/site-packages/tornado/gen.py\", line 1069, in run\r\n\t\tyielded = self.gen.send(value) \r\n\t File \"lib/python3.5/site-packages/distributed/nanny.py\", line 386, in _wait_until_running\r\n\t\traise ValueError(\"Worker not started\") \r\n\tValueError: Worker not started \r\n\r\nThis `ValueError` does not seem to be passed up to my application; my application seems frozen waiting for the cluster start up (not 100% sure where) - ie, it never returns from the `LocalCluster()` call. Although the multiprocessing pool actually does start up, in this case too late to matter.\r\n\r\nHow can I catch / retry this situation?\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nfrom datetime import timedelta\nfrom functools import partial\nimport json\nimport logging\nimport os\nimport shutil\nimport signal\nfrom sys import exit\nfrom time import sleep\n\nimport click\nfrom distributed import Nanny, Worker, rpc\nfrom distributed.utils import All, get_ip_interface\nfrom distributed.worker import _ncores\nfrom distributed.http import HTTPWorker\nfrom distributed.metrics import time\nfrom distributed.security import Security\nfrom distributed.cli.utils import check_python_3, uri_from_host_port\n\nfrom toolz import valmap\nfrom tornado.ioloop import IOLoop, TimeoutError\nfrom tornado import gen\n\nlogger = logging.getLogger('distributed.dask_worker')\n\n\npem_file_option_type = click.Path(exists=True, resolve_path=True)\n\[email protected]()\[email protected]('scheduler', type=str, required=False)\[email protected]('--tls-ca-file', type=pem_file_option_type, default=None,\n help=\"CA cert(s) file for TLS (in PEM format)\")\[email protected]('--tls-cert', type=pem_file_option_type, default=None,\n help=\"certificate file for TLS (in PEM format)\")\[email protected]('--tls-key', type=pem_file_option_type, default=None,\n help=\"private key file for TLS (in PEM format)\")\[email protected]('--worker-port', type=int, default=0,\n help=\"Serving computation port, defaults to random\")\[email protected]('--http-port', type=int, default=0,\n help=\"Serving http port, defaults to random\")\[email protected]('--nanny-port', type=int, default=0,\n help=\"Serving nanny port, defaults to random\")\[email protected]('--bokeh-port', type=int, default=8789,\n help=\"Bokeh port, defaults to 8789\")\[email protected]('--bokeh/--no-bokeh', 'bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=None,\n help=\"Serving host. Should be an ip address that is\"\n \" visible to the scheduler and other workers. \"\n \"See --interface.\")\[email protected]('--interface', type=str, default=None,\n help=\"Network interface like 'eth0' or 'ib0'\")\[email protected]('--nthreads', type=int, default=0,\n help=\"Number of threads per process.\")\[email protected]('--nprocs', type=int, default=1,\n help=\"Number of worker processes. Defaults to one.\")\[email protected]('--name', type=str, default='',\n help=\"A unique name for this worker like 'worker-1'\")\[email protected]('--memory-limit', default='auto',\n help=\"Number of bytes before spilling data to disk. \"\n \"This can be an integer (nbytes) \"\n \"float (fraction of total memory) \"\n \"or 'auto'\")\[email protected]('--reconnect/--no-reconnect', default=True,\n help=\"Reconnect to scheduler if disconnected\")\[email protected]('--nanny/--no-nanny', default=True,\n help=\"Start workers in nanny process for management\")\[email protected]('--pid-file', type=str, default='',\n help=\"File to write the process PID\")\[email protected]('--local-directory', default='', type=str,\n help=\"Directory to place worker files\")\[email protected]('--resources', type=str, default='',\n help='Resources for task constraints like \"GPU=2 MEM=10e9\"')\[email protected]('--scheduler-file', type=str, default='',\n help='Filename to JSON encoded scheduler information. '\n 'Use with dask-scheduler --scheduler-file')\[email protected]('--death-timeout', type=float, default=None,\n help=\"Seconds to wait for a scheduler before closing\")\[email protected]('--bokeh-prefix', type=str, default=None,\n help=\"Prefix for the bokeh app\")\[email protected]('--preload', type=str, multiple=True,\n help='Module that should be loaded by each worker process '\n 'like \"foo.bar\" or \"/path/to/foo.py\"')\ndef main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,\n nanny, name, memory_limit, pid_file, reconnect,\n resources, bokeh, bokeh_port, local_directory, scheduler_file,\n interface, death_timeout, preload, bokeh_prefix,\n tls_ca_file, tls_cert, tls_key):\n sec = Security(tls_ca_file=tls_ca_file,\n tls_worker_cert=tls_cert,\n tls_worker_key=tls_key,\n )\n\n if nanny:\n port = nanny_port\n else:\n port = worker_port\n\n if nprocs > 1 and worker_port != 0:\n logger.error(\"Failed to launch worker. You cannot use the --port argument when nprocs > 1.\")\n exit(1)\n\n if nprocs > 1 and name:\n logger.error(\"Failed to launch worker. You cannot use the --name argument when nprocs > 1.\")\n exit(1)\n\n if nprocs > 1 and not nanny:\n logger.error(\"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1.\")\n exit(1)\n\n if not nthreads:\n nthreads = _ncores // nprocs\n\n if pid_file:\n with open(pid_file, 'w') as f:\n f.write(str(os.getpid()))\n\n def del_pid_file():\n if os.path.exists(pid_file):\n os.remove(pid_file)\n atexit.register(del_pid_file)\n\n services = {('http', http_port): HTTPWorker}\n\n if bokeh:\n try:\n from distributed.bokeh.worker import BokehWorker\n except ImportError:\n pass\n else:\n if bokeh_prefix:\n result = (BokehWorker, {'prefix': bokeh_prefix})\n else:\n result = BokehWorker\n services[('bokeh', bokeh_port)] = result\n\n if resources:\n resources = resources.replace(',', ' ').split()\n resources = dict(pair.split('=') for pair in resources)\n resources = valmap(float, resources)\n else:\n resources = None\n\n loop = IOLoop.current()\n\n if nanny:\n kwargs = {'worker_port': worker_port}\n t = Nanny\n else:\n kwargs = {}\n if nanny_port:\n kwargs['service_ports'] = {'nanny': nanny_port}\n t = Worker\n\n if scheduler_file:\n while not os.path.exists(scheduler_file):\n sleep(0.01)\n for i in range(10):\n try:\n with open(scheduler_file) as f:\n cfg = json.load(f)\n scheduler = cfg['address']\n break\n except (ValueError, KeyError): # race with scheduler on file\n sleep(0.01)\n\n if not scheduler:\n raise ValueError(\"Need to provide scheduler address like\\n\"\n \"dask-worker SCHEDULER_ADDRESS:8786\")\n\n if interface:\n if host:\n raise ValueError(\"Can not specify both interface and host\")\n else:\n host = get_ip_interface(interface)\n\n if host or port:\n addr = uri_from_host_port(host, port, 0)\n else:\n # Choose appropriate address for scheduler\n addr = None\n\n nannies = [t(scheduler, ncores=nthreads,\n services=services, name=name, loop=loop, resources=resources,\n memory_limit=memory_limit, reconnect=reconnect,\n local_dir=local_directory, death_timeout=death_timeout,\n preload=preload, security=sec,\n **kwargs)\n for i in range(nprocs)]\n\n @gen.coroutine\n def close_all():\n try:\n if nanny:\n yield [n._close(timeout=2) for n in nannies]\n finally:\n loop.stop()\n\n def handle_signal(signum, frame):\n logger.info(\"Exiting on signal %d\", signum)\n if loop._running:\n loop.add_callback_from_signal(loop.stop)\n else:\n exit(0)\n\n # NOTE: We can't use the generic install_signal_handlers() function from\n # distributed.cli.utils because we're handling the signal differently.\n signal.signal(signal.SIGINT, handle_signal)\n signal.signal(signal.SIGTERM, handle_signal)\n\n for n in nannies:\n n.start(addr)\n\n @gen.coroutine\n def run():\n while all(n.status != 'closed' for n in nannies):\n yield gen.sleep(0.2)\n\n try:\n loop.run_sync(run)\n except (KeyboardInterrupt, TimeoutError):\n pass\n finally:\n logger.info(\"End worker\")\n\n # Clean exit: unregister all workers from scheduler\n loop.run_sync(close_all)\n\n\ndef go():\n check_python_3()\n main()\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dask_worker.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nfrom datetime import timedelta\nfrom functools import partial\nimport json\nimport logging\nimport os\nimport shutil\nimport signal\nfrom sys import exit\nfrom time import sleep\n\nimport click\nfrom distributed import Nanny, Worker, rpc\nfrom distributed.utils import All, get_ip_interface\nfrom distributed.worker import _ncores\nfrom distributed.http import HTTPWorker\nfrom distributed.metrics import time\nfrom distributed.security import Security\nfrom distributed.cli.utils import check_python_3, uri_from_host_port\n\nfrom toolz import valmap\nfrom tornado.ioloop import IOLoop, TimeoutError\nfrom tornado import gen\n\nlogger = logging.getLogger('distributed.dask_worker')\n\n\npem_file_option_type = click.Path(exists=True, resolve_path=True)\n\[email protected]()\[email protected]('scheduler', type=str, required=False)\[email protected]('--tls-ca-file', type=pem_file_option_type, default=None,\n help=\"CA cert(s) file for TLS (in PEM format)\")\[email protected]('--tls-cert', type=pem_file_option_type, default=None,\n help=\"certificate file for TLS (in PEM format)\")\[email protected]('--tls-key', type=pem_file_option_type, default=None,\n help=\"private key file for TLS (in PEM format)\")\[email protected]('--worker-port', type=int, default=0,\n help=\"Serving computation port, defaults to random\")\[email protected]('--http-port', type=int, default=0,\n help=\"Serving http port, defaults to random\")\[email protected]('--nanny-port', type=int, default=0,\n help=\"Serving nanny port, defaults to random\")\[email protected]('--bokeh-port', type=int, default=8789,\n help=\"Bokeh port, defaults to 8789\")\[email protected]('--bokeh/--no-bokeh', 'bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=None,\n help=\"Serving host. Should be an ip address that is\"\n \" visible to the scheduler and other workers. \"\n \"See --interface.\")\[email protected]('--interface', type=str, default=None,\n help=\"Network interface like 'eth0' or 'ib0'\")\[email protected]('--nthreads', type=int, default=0,\n help=\"Number of threads per process.\")\[email protected]('--nprocs', type=int, default=1,\n help=\"Number of worker processes. Defaults to one.\")\[email protected]('--name', type=str, default='',\n help=\"A unique name for this worker like 'worker-1'\")\[email protected]('--memory-limit', default='auto',\n help=\"Number of bytes before spilling data to disk. \"\n \"This can be an integer (nbytes) \"\n \"float (fraction of total memory) \"\n \"or 'auto'\")\[email protected]('--reconnect/--no-reconnect', default=True,\n help=\"Reconnect to scheduler if disconnected\")\[email protected]('--nanny/--no-nanny', default=True,\n help=\"Start workers in nanny process for management\")\[email protected]('--pid-file', type=str, default='',\n help=\"File to write the process PID\")\[email protected]('--local-directory', default='', type=str,\n help=\"Directory to place worker files\")\[email protected]('--resources', type=str, default='',\n help='Resources for task constraints like \"GPU=2 MEM=10e9\"')\[email protected]('--scheduler-file', type=str, default='',\n help='Filename to JSON encoded scheduler information. '\n 'Use with dask-scheduler --scheduler-file')\[email protected]('--death-timeout', type=float, default=None,\n help=\"Seconds to wait for a scheduler before closing\")\[email protected]('--bokeh-prefix', type=str, default=None,\n help=\"Prefix for the bokeh app\")\[email protected]('--preload', type=str, multiple=True,\n help='Module that should be loaded by each worker process '\n 'like \"foo.bar\" or \"/path/to/foo.py\"')\ndef main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,\n nanny, name, memory_limit, pid_file, reconnect,\n resources, bokeh, bokeh_port, local_directory, scheduler_file,\n interface, death_timeout, preload, bokeh_prefix,\n tls_ca_file, tls_cert, tls_key):\n sec = Security(tls_ca_file=tls_ca_file,\n tls_worker_cert=tls_cert,\n tls_worker_key=tls_key,\n )\n\n if nanny:\n port = nanny_port\n else:\n port = worker_port\n\n if nprocs > 1 and worker_port != 0:\n logger.error(\"Failed to launch worker. You cannot use the --port argument when nprocs > 1.\")\n exit(1)\n\n if nprocs > 1 and name:\n logger.error(\"Failed to launch worker. You cannot use the --name argument when nprocs > 1.\")\n exit(1)\n\n if nprocs > 1 and not nanny:\n logger.error(\"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1.\")\n exit(1)\n\n if not nthreads:\n nthreads = _ncores // nprocs\n\n if pid_file:\n with open(pid_file, 'w') as f:\n f.write(str(os.getpid()))\n\n def del_pid_file():\n if os.path.exists(pid_file):\n os.remove(pid_file)\n atexit.register(del_pid_file)\n\n services = {('http', http_port): HTTPWorker}\n\n if bokeh:\n try:\n from distributed.bokeh.worker import BokehWorker\n except ImportError:\n pass\n else:\n if bokeh_prefix:\n result = (BokehWorker, {'prefix': bokeh_prefix})\n else:\n result = BokehWorker\n services[('bokeh', bokeh_port)] = result\n\n if resources:\n resources = resources.replace(',', ' ').split()\n resources = dict(pair.split('=') for pair in resources)\n resources = valmap(float, resources)\n else:\n resources = None\n\n loop = IOLoop.current()\n\n if nanny:\n kwargs = {'worker_port': worker_port}\n t = Nanny\n else:\n kwargs = {}\n if nanny_port:\n kwargs['service_ports'] = {'nanny': nanny_port}\n t = Worker\n\n if scheduler_file:\n while not os.path.exists(scheduler_file):\n sleep(0.01)\n for i in range(10):\n try:\n with open(scheduler_file) as f:\n cfg = json.load(f)\n scheduler = cfg['address']\n break\n except (ValueError, KeyError): # race with scheduler on file\n sleep(0.01)\n\n if not scheduler:\n raise ValueError(\"Need to provide scheduler address like\\n\"\n \"dask-worker SCHEDULER_ADDRESS:8786\")\n\n if interface:\n if host:\n raise ValueError(\"Can not specify both interface and host\")\n else:\n host = get_ip_interface(interface)\n\n if host or port:\n addr = uri_from_host_port(host, port, 0)\n else:\n # Choose appropriate address for scheduler\n addr = None\n\n nannies = [t(scheduler, ncores=nthreads,\n services=services, name=name, loop=loop, resources=resources,\n memory_limit=memory_limit, reconnect=reconnect,\n local_dir=local_directory, death_timeout=death_timeout,\n preload=preload, security=sec,\n **kwargs)\n for i in range(nprocs)]\n\n @gen.coroutine\n def close_all():\n try:\n if nanny:\n yield [n._close(timeout=2) for n in nannies]\n finally:\n loop.stop()\n\n def handle_signal(signum, frame):\n logger.info(\"Exiting on signal %d\", signum)\n if loop._running:\n loop.add_callback_from_signal(loop.stop)\n else:\n exit(0)\n\n # NOTE: We can't use the generic install_signal_handlers() function from\n # distributed.cli.utils because we're handling the signal differently.\n signal.signal(signal.SIGINT, handle_signal)\n signal.signal(signal.SIGTERM, handle_signal)\n\n @gen.coroutine\n def run():\n yield [n.start(addr) for n in nannies]\n while all(n.status != 'closed' for n in nannies):\n yield gen.sleep(0.2)\n\n try:\n loop.run_sync(run)\n except (KeyboardInterrupt, TimeoutError):\n pass\n finally:\n logger.info(\"End worker\")\n\n # Clean exit: unregister all workers from scheduler\n loop.run_sync(close_all)\n\n\ndef go():\n check_python_3()\n main()\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dask_worker.py"}]} | 3,300 | 139 |
gh_patches_debug_38550 | rasdani/github-patches | git_diff | ethereum__web3.py-2917 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`web3.providers.ipc.get_default_ipc_path()` returned `None` instead of `str`
* Version: 6.0.0
* Python: 3.10
* OS: macky whacky
* `pip freeze` output
```
will provide if needed
```
### What was wrong?
The method `get_deafult_ipc_path()` located in `web3.providers.ipc` returned `None` even though its return type is documented as `str`.
I am not sure why it returned `None` in my case, probably forgot some flag in `geth`.
Edit: it returned None in my case because i was checking it before starting geth, so it didnt exist yet.
### How can it be fixed?
Either change type to `Optional[str]` or raise an error instead of returning `None` ?
---
**Note:** We prefer to use issues to track our work. If you think you've encountered a bug in web3py or
have a feature request, you're in the right place. If you have implementation or usage questions,
please refer to our [documentation](https://web3py.readthedocs.io/en/latest/) and/or join the conversation
on [discord](https://discord.gg/GHryRvPB84).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/providers/ipc.py`
Content:
```
1 from json import (
2 JSONDecodeError,
3 )
4 import logging
5 import os
6 from pathlib import (
7 Path,
8 )
9 import socket
10 import sys
11 import threading
12 from types import (
13 TracebackType,
14 )
15 from typing import (
16 Any,
17 Type,
18 Union,
19 )
20
21 from web3._utils.threads import (
22 Timeout,
23 )
24 from web3.types import (
25 RPCEndpoint,
26 RPCResponse,
27 )
28
29 from .base import (
30 JSONBaseProvider,
31 )
32
33
34 def get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:
35 if sys.platform == "win32":
36 # On Windows named pipe is used. Simulate socket with it.
37 from web3._utils.windows import (
38 NamedPipe,
39 )
40
41 return NamedPipe(ipc_path)
42 else:
43 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
44 sock.connect(ipc_path)
45 sock.settimeout(timeout)
46 return sock
47
48
49 class PersistantSocket:
50 sock = None
51
52 def __init__(self, ipc_path: str) -> None:
53 self.ipc_path = ipc_path
54
55 def __enter__(self) -> socket.socket:
56 if not self.ipc_path:
57 raise FileNotFoundError(
58 f"cannot connect to IPC socket at path: {self.ipc_path!r}"
59 )
60
61 if not self.sock:
62 self.sock = self._open()
63 return self.sock
64
65 def __exit__(
66 self,
67 exc_type: Type[BaseException],
68 exc_value: BaseException,
69 traceback: TracebackType,
70 ) -> None:
71 # only close the socket if there was an error
72 if exc_value is not None:
73 try:
74 self.sock.close()
75 except Exception:
76 pass
77 self.sock = None
78
79 def _open(self) -> socket.socket:
80 return get_ipc_socket(self.ipc_path)
81
82 def reset(self) -> socket.socket:
83 self.sock.close()
84 self.sock = self._open()
85 return self.sock
86
87
88 # type ignored b/c missing return statement is by design here
89 def get_default_ipc_path() -> str: # type: ignore
90 if sys.platform == "darwin":
91 ipc_path = os.path.expanduser(
92 os.path.join("~", "Library", "Ethereum", "geth.ipc")
93 )
94 if os.path.exists(ipc_path):
95 return ipc_path
96
97 elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
98 ipc_path = os.path.expanduser(os.path.join("~", ".ethereum", "geth.ipc"))
99 if os.path.exists(ipc_path):
100 return ipc_path
101
102 elif sys.platform == "win32":
103 ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
104 if os.path.exists(ipc_path):
105 return ipc_path
106
107 else:
108 raise ValueError(
109 f"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/"
110 "freebsd are supported. You must specify the ipc_path"
111 )
112
113
114 # type ignored b/c missing return statement is by design here
115 def get_dev_ipc_path() -> str: # type: ignore
116 if os.environ.get("WEB3_PROVIDER_URI", ""):
117 ipc_path = os.environ.get("WEB3_PROVIDER_URI")
118 if os.path.exists(ipc_path):
119 return ipc_path
120 elif sys.platform == "darwin":
121 tmpdir = os.environ.get("TMPDIR", "")
122 ipc_path = os.path.expanduser(os.path.join(tmpdir, "geth.ipc"))
123 if os.path.exists(ipc_path):
124 return ipc_path
125
126 elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
127 ipc_path = os.path.expanduser(os.path.join("/tmp", "geth.ipc"))
128 if os.path.exists(ipc_path):
129 return ipc_path
130
131 elif sys.platform == "win32":
132 ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
133 if os.path.exists(ipc_path):
134 return ipc_path
135
136 ipc_path = os.path.join("\\\\", ".", "pipe", "jsonrpc.ipc")
137 if os.path.exists(ipc_path):
138 return ipc_path
139
140 else:
141 raise ValueError(
142 f"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/"
143 "freebsd are supported. You must specify the ipc_path"
144 )
145
146
147 class IPCProvider(JSONBaseProvider):
148 logger = logging.getLogger("web3.providers.IPCProvider")
149 _socket = None
150
151 def __init__(
152 self,
153 ipc_path: Union[str, Path] = None,
154 timeout: int = 10,
155 *args: Any,
156 **kwargs: Any,
157 ) -> None:
158 if ipc_path is None:
159 self.ipc_path = get_default_ipc_path()
160 elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):
161 self.ipc_path = str(Path(ipc_path).expanduser().resolve())
162 else:
163 raise TypeError("ipc_path must be of type string or pathlib.Path")
164
165 self.timeout = timeout
166 self._lock = threading.Lock()
167 self._socket = PersistantSocket(self.ipc_path)
168 super().__init__()
169
170 def __str__(self) -> str:
171 return f"<{self.__class__.__name__} {self.ipc_path}>"
172
173 def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
174 self.logger.debug(
175 f"Making request IPC. Path: {self.ipc_path}, Method: {method}"
176 )
177 request = self.encode_rpc_request(method, params)
178
179 with self._lock, self._socket as sock:
180 try:
181 sock.sendall(request)
182 except BrokenPipeError:
183 # one extra attempt, then give up
184 sock = self._socket.reset()
185 sock.sendall(request)
186
187 raw_response = b""
188 with Timeout(self.timeout) as timeout:
189 while True:
190 try:
191 raw_response += sock.recv(4096)
192 except socket.timeout:
193 timeout.sleep(0)
194 continue
195 if raw_response == b"":
196 timeout.sleep(0)
197 elif has_valid_json_rpc_ending(raw_response):
198 try:
199 response = self.decode_rpc_response(raw_response)
200 except JSONDecodeError:
201 timeout.sleep(0)
202 continue
203 else:
204 return response
205 else:
206 timeout.sleep(0)
207 continue
208
209
210 # A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification
211 def has_valid_json_rpc_ending(raw_response: bytes) -> bool:
212 stripped_raw_response = raw_response.rstrip()
213 for valid_ending in [b"}", b"]"]:
214 if stripped_raw_response.endswith(valid_ending):
215 return True
216 else:
217 return False
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py
--- a/web3/providers/ipc.py
+++ b/web3/providers/ipc.py
@@ -14,6 +14,7 @@
)
from typing import (
Any,
+ Optional,
Type,
Union,
)
@@ -85,24 +86,26 @@
return self.sock
-# type ignored b/c missing return statement is by design here
-def get_default_ipc_path() -> str: # type: ignore
+def get_default_ipc_path() -> Optional[str]:
if sys.platform == "darwin":
ipc_path = os.path.expanduser(
os.path.join("~", "Library", "Ethereum", "geth.ipc")
)
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
ipc_path = os.path.expanduser(os.path.join("~", ".ethereum", "geth.ipc"))
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform == "win32":
ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
if os.path.exists(ipc_path):
return ipc_path
+ return None
else:
raise ValueError(
@@ -111,22 +114,25 @@
)
-# type ignored b/c missing return statement is by design here
-def get_dev_ipc_path() -> str: # type: ignore
+def get_dev_ipc_path() -> Optional[str]:
if os.environ.get("WEB3_PROVIDER_URI", ""):
ipc_path = os.environ.get("WEB3_PROVIDER_URI")
if os.path.exists(ipc_path):
return ipc_path
+ return None
+
elif sys.platform == "darwin":
tmpdir = os.environ.get("TMPDIR", "")
ipc_path = os.path.expanduser(os.path.join(tmpdir, "geth.ipc"))
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
ipc_path = os.path.expanduser(os.path.join("/tmp", "geth.ipc"))
if os.path.exists(ipc_path):
return ipc_path
+ return None
elif sys.platform == "win32":
ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")
| {"golden_diff": "diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py\n--- a/web3/providers/ipc.py\n+++ b/web3/providers/ipc.py\n@@ -14,6 +14,7 @@\n )\n from typing import (\n Any,\n+ Optional,\n Type,\n Union,\n )\n@@ -85,24 +86,26 @@\n return self.sock\n \n \n-# type ignored b/c missing return statement is by design here\n-def get_default_ipc_path() -> str: # type: ignore\n+def get_default_ipc_path() -> Optional[str]:\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n else:\n raise ValueError(\n@@ -111,22 +114,25 @@\n )\n \n \n-# type ignored b/c missing return statement is by design here\n-def get_dev_ipc_path() -> str: # type: ignore\n+def get_dev_ipc_path() -> Optional[str]:\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n+\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n+ return None\n \n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n", "issue": "`web3.providers.ipc.get_default_ipc_path()` returned `None` instead of `str`\n* Version: 6.0.0\r\n* Python: 3.10\r\n* OS: macky whacky\r\n* `pip freeze` output\r\n\r\n```\r\nwill provide if needed\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\nThe method `get_deafult_ipc_path()` located in `web3.providers.ipc` returned `None` even though its return type is documented as `str`.\r\n\r\nI am not sure why it returned `None` in my case, probably forgot some flag in `geth`.\r\n\r\nEdit: it returned None in my case because i was checking it before starting geth, so it didnt exist yet.\r\n\r\n### How can it be fixed?\r\n\r\nEither change type to `Optional[str]` or raise an error instead of returning `None` ?\r\n\r\n---\r\n**Note:** We prefer to use issues to track our work. If you think you've encountered a bug in web3py or \r\nhave a feature request, you're in the right place. If you have implementation or usage questions, \r\nplease refer to our [documentation](https://web3py.readthedocs.io/en/latest/) and/or join the conversation \r\non [discord](https://discord.gg/GHryRvPB84).\r\n\n", "before_files": [{"content": "from json import (\n JSONDecodeError,\n)\nimport logging\nimport os\nfrom pathlib import (\n Path,\n)\nimport socket\nimport sys\nimport threading\nfrom types import (\n TracebackType,\n)\nfrom typing import (\n Any,\n Type,\n Union,\n)\n\nfrom web3._utils.threads import (\n Timeout,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .base import (\n JSONBaseProvider,\n)\n\n\ndef get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:\n if sys.platform == \"win32\":\n # On Windows named pipe is used. Simulate socket with it.\n from web3._utils.windows import (\n NamedPipe,\n )\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket:\n sock = None\n\n def __init__(self, ipc_path: str) -> None:\n self.ipc_path = ipc_path\n\n def __enter__(self) -> socket.socket:\n if not self.ipc_path:\n raise FileNotFoundError(\n f\"cannot connect to IPC socket at path: {self.ipc_path!r}\"\n )\n\n if not self.sock:\n self.sock = self._open()\n return self.sock\n\n def __exit__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ) -> None:\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n def _open(self) -> socket.socket:\n return get_ipc_socket(self.ipc_path)\n\n def reset(self) -> socket.socket:\n self.sock.close()\n self.sock = self._open()\n return self.sock\n\n\n# type ignored b/c missing return statement is by design here\ndef get_default_ipc_path() -> str: # type: ignore\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\n# type ignored b/c missing return statement is by design here\ndef get_dev_ipc_path() -> str: # type: ignore\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"jsonrpc.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.IPCProvider\")\n _socket = None\n\n def __init__(\n self,\n ipc_path: Union[str, Path] = None,\n timeout: int = 10,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path()\n elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):\n self.ipc_path = str(Path(ipc_path).expanduser().resolve())\n else:\n raise TypeError(\"ipc_path must be of type string or pathlib.Path\")\n\n self.timeout = timeout\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super().__init__()\n\n def __str__(self) -> str:\n return f\"<{self.__class__.__name__} {self.ipc_path}>\"\n\n def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request IPC. Path: {self.ipc_path}, Method: {method}\"\n )\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n try:\n sock.sendall(request)\n except BrokenPipeError:\n # one extra attempt, then give up\n sock = self._socket.reset()\n sock.sendall(request)\n\n raw_response = b\"\"\n with Timeout(self.timeout) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n elif has_valid_json_rpc_ending(raw_response):\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n else:\n timeout.sleep(0)\n continue\n\n\n# A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification\ndef has_valid_json_rpc_ending(raw_response: bytes) -> bool:\n stripped_raw_response = raw_response.rstrip()\n for valid_ending in [b\"}\", b\"]\"]:\n if stripped_raw_response.endswith(valid_ending):\n return True\n else:\n return False\n", "path": "web3/providers/ipc.py"}], "after_files": [{"content": "from json import (\n JSONDecodeError,\n)\nimport logging\nimport os\nfrom pathlib import (\n Path,\n)\nimport socket\nimport sys\nimport threading\nfrom types import (\n TracebackType,\n)\nfrom typing import (\n Any,\n Optional,\n Type,\n Union,\n)\n\nfrom web3._utils.threads import (\n Timeout,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .base import (\n JSONBaseProvider,\n)\n\n\ndef get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:\n if sys.platform == \"win32\":\n # On Windows named pipe is used. Simulate socket with it.\n from web3._utils.windows import (\n NamedPipe,\n )\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket:\n sock = None\n\n def __init__(self, ipc_path: str) -> None:\n self.ipc_path = ipc_path\n\n def __enter__(self) -> socket.socket:\n if not self.ipc_path:\n raise FileNotFoundError(\n f\"cannot connect to IPC socket at path: {self.ipc_path!r}\"\n )\n\n if not self.sock:\n self.sock = self._open()\n return self.sock\n\n def __exit__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ) -> None:\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n def _open(self) -> socket.socket:\n return get_ipc_socket(self.ipc_path)\n\n def reset(self) -> socket.socket:\n self.sock.close()\n self.sock = self._open()\n return self.sock\n\n\ndef get_default_ipc_path() -> Optional[str]:\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\ndef get_dev_ipc_path() -> Optional[str]:\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"jsonrpc.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.IPCProvider\")\n _socket = None\n\n def __init__(\n self,\n ipc_path: Union[str, Path] = None,\n timeout: int = 10,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path()\n elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):\n self.ipc_path = str(Path(ipc_path).expanduser().resolve())\n else:\n raise TypeError(\"ipc_path must be of type string or pathlib.Path\")\n\n self.timeout = timeout\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super().__init__()\n\n def __str__(self) -> str:\n return f\"<{self.__class__.__name__} {self.ipc_path}>\"\n\n def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request IPC. Path: {self.ipc_path}, Method: {method}\"\n )\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n try:\n sock.sendall(request)\n except BrokenPipeError:\n # one extra attempt, then give up\n sock = self._socket.reset()\n sock.sendall(request)\n\n raw_response = b\"\"\n with Timeout(self.timeout) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n elif has_valid_json_rpc_ending(raw_response):\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n else:\n timeout.sleep(0)\n continue\n\n\n# A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification\ndef has_valid_json_rpc_ending(raw_response: bytes) -> bool:\n stripped_raw_response = raw_response.rstrip()\n for valid_ending in [b\"}\", b\"]\"]:\n if stripped_raw_response.endswith(valid_ending):\n return True\n else:\n return False\n", "path": "web3/providers/ipc.py"}]} | 2,561 | 547 |
gh_patches_debug_28692 | rasdani/github-patches | git_diff | spyder-ide__spyder-12435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check handling of str responses in Kite client signals
Sorry for taking so long to respond. I've included everything that I thought might be relevant. I think @metalogical has it right, however I it would be pretty difficult for me to convince the security guys at work to change anything. Would it be possible to change the error handling so it is able to continue after an error like this? Especially since I have explicitly disabled Kite and the software works fine if it encounters the error when already running.
2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Start completion server for C:\Users\[removed]\.spyder-py3\temp.py [Python]
2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Python completion server is ready
2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.widgets.codeeditor] -> Completions services available for: C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.plugin] -> python completion server request: 'textDocument/didOpen'
2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from lsp
2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Gather responses for textDocument/didOpen
2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from kite
2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Set focus to: C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.completion.fallback.actor] -> Got request id 0: textDocument/didOpen for file C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,167 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Current changed: 0 - C:\Users\[removed]\.spyder-py3\temp.py
2020-04-20 12:05:42,170 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Added thread <spyder.plugins.editor.widgets.editor.AnalysisThread object at 0x0000019B03720A68> to queue
2020-04-20 12:05:42,189 [INFO] [spyder.app.mainwindow] -> Launching code completion client for Python...
2020-04-20 12:05:42,203 [DEBUG] [spyder.plugins.completion.kite.client] -> Starting Kite HTTP session...
2020-04-20 12:05:42,208 [DEBUG] [urllib3.connectionpool] -> Starting new HTTP connection (1): [vpn server name].[company name].com:80
2020-04-20 12:05:42,512 [DEBUG] [urllib3.connectionpool] -> http://[vpn server name].[company name].com:80 "GET http://127.0.0.1:46624/clientapi/languages HTTP/1.1" 200 7277
Traceback (most recent call last):
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\app\mainwindow.py", line 3718, in main
mainwindow = run_spyder(app, options, args)
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\app\mainwindow.py", line 3559, in run_spyder
main.setup()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\app\mainwindow.py", line 960, in setup
self.completions.start()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\plugins\completion\plugin.py", line 292, in start
client_info['plugin'].start()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\plugins\completion\kite\plugin.py", line 144, in start
self.client.start()
File "C:\Progra~1\Anaconda3\lib\site-packages\spyder\plugins\completion\kite\client.py", line 62, in start
self.sig_client_started.emit(self.languages)
TypeError: KiteClient.sig_client_started[list].emit(): argument 1 has unexpected type 'str'
_Originally posted by @nsluhrs in https://github.com/spyder-ide/spyder/issues/12357#issuecomment-616665440_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/plugins/completion/kite/client.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """Kite completions HTTP client."""
8
9 # Standard library imports
10 import logging
11 try:
12 from urllib import quote # Python 2
13 except ImportError:
14 from urllib.parse import quote # Python 3
15
16 # Third party imports
17 from qtpy.QtCore import QObject, QThread, Signal, QMutex
18 import requests
19
20 # Local imports
21 from spyder.config.base import _
22 from spyder.plugins.completion.kite import KITE_ENDPOINTS, KITE_REQUEST_MAPPING
23 from spyder.plugins.completion.kite.decorators import class_register
24 from spyder.plugins.completion.kite.providers import KiteMethodProviderMixIn
25 from spyder.plugins.completion.kite.utils.status import (
26 status, check_if_kite_running)
27 from spyder.py3compat import (
28 ConnectionError, ConnectionRefusedError, TEXT_TYPES)
29
30
31 logger = logging.getLogger(__name__)
32
33
34 @class_register
35 class KiteClient(QObject, KiteMethodProviderMixIn):
36 sig_response_ready = Signal(int, dict)
37 sig_client_started = Signal(list)
38 sig_client_not_responding = Signal()
39 sig_perform_request = Signal(int, str, object)
40 sig_perform_status_request = Signal(str)
41 sig_status_response_ready = Signal((str,), (dict,))
42 sig_perform_onboarding_request = Signal()
43 sig_onboarding_response_ready = Signal(str)
44
45 def __init__(self, parent, enable_code_snippets=True):
46 QObject.__init__(self, parent)
47 self.endpoint = None
48 self.requests = {}
49 self.languages = []
50 self.mutex = QMutex()
51 self.opened_files = {}
52 self.opened_files_status = {}
53 self.thread_started = False
54 self.enable_code_snippets = enable_code_snippets
55 self.thread = QThread()
56 self.moveToThread(self.thread)
57 self.thread.started.connect(self.started)
58 self.sig_perform_request.connect(self.perform_request)
59 self.sig_perform_status_request.connect(self.get_status)
60 self.sig_perform_onboarding_request.connect(self.get_onboarding_file)
61
62 def start(self):
63 if not self.thread_started:
64 self.thread.start()
65 logger.debug('Starting Kite HTTP session...')
66 self.endpoint = requests.Session()
67 self.languages = self.get_languages()
68 self.sig_client_started.emit(self.languages)
69
70 def started(self):
71 self.thread_started = True
72
73 def stop(self):
74 if self.thread_started:
75 logger.debug('Closing Kite HTTP session...')
76 self.endpoint.close()
77 self.thread.quit()
78
79 def get_languages(self):
80 verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT
81 success, response = self.perform_http_request(verb, url)
82 if response is None:
83 response = ['python']
84 return response
85
86 def _get_onboarding_file(self):
87 """Perform a request to get kite's onboarding file."""
88 verb, url = KITE_ENDPOINTS.ONBOARDING_ENDPOINT
89 success, response = self.perform_http_request(verb, url)
90 return response
91
92 def get_onboarding_file(self):
93 """Get onboarding file."""
94 onboarding_file = self._get_onboarding_file()
95 self.sig_onboarding_response_ready.emit(onboarding_file)
96
97 def _get_status(self, filename):
98 """Perform a request to get kite status for a file."""
99 verb, url = KITE_ENDPOINTS.STATUS_ENDPOINT
100 if filename:
101 url_params = {'filename': filename}
102 else:
103 url_params = {'filetype': 'python'}
104 success, response = self.perform_http_request(
105 verb, url, url_params=url_params)
106 return success, response
107
108 def get_status(self, filename):
109 """Get kite status for a given filename."""
110 success_status, kite_status = self._get_status(filename)
111 if not filename or kite_status is None:
112 kite_status = status()
113 self.sig_status_response_ready[str].emit(kite_status)
114 elif isinstance(kite_status, TEXT_TYPES):
115 if not success_status:
116 status_str = status(extra_status=' with errors')
117 long_str = _("<code>{error}</code><br><br>"
118 "Note: If you are using a VPN, "
119 "please don't route requests to "
120 "localhost/127.0.0.1 with it").format(
121 error=kite_status)
122 else:
123 status_str = status()
124 long_str = kite_status
125 kite_status_dict = {
126 'status': status_str,
127 'short': status_str,
128 'long': long_str}
129 self.sig_status_response_ready[dict].emit(kite_status_dict)
130 else:
131 self.sig_status_response_ready[dict].emit(kite_status)
132
133 def perform_http_request(self, verb, url, url_params=None, params=None):
134 response = None
135 http_method = getattr(self.endpoint, verb)
136 try:
137 http_response = http_method(url, params=url_params, json=params)
138 except Exception as error:
139 return False, None
140 success = http_response.status_code == 200
141 if success:
142 try:
143 response = http_response.json()
144 except Exception:
145 response = http_response.text
146 response = None if response == '' else response
147 return success, response
148
149 def send(self, method, params, url_params):
150 response = None
151 if self.endpoint is not None and method in KITE_REQUEST_MAPPING:
152 http_verb, path = KITE_REQUEST_MAPPING[method]
153 encoded_url_params = {
154 key: quote(value) if isinstance(value, TEXT_TYPES) else value
155 for (key, value) in url_params.items()}
156 path = path.format(**encoded_url_params)
157 try:
158 success, response = self.perform_http_request(
159 http_verb, path, params=params)
160 except (ConnectionRefusedError, ConnectionError):
161 return response
162 return response
163
164 def perform_request(self, req_id, method, params):
165 response = None
166 if method in self.sender_registry:
167 logger.debug('Perform {0} request with id {1}'.format(
168 method, req_id))
169 handler_name = self.sender_registry[method]
170 handler = getattr(self, handler_name)
171 response = handler(params)
172 if method in self.handler_registry:
173 converter_name = self.handler_registry[method]
174 converter = getattr(self, converter_name)
175 if response is not None:
176 response = converter(response)
177 self.sig_response_ready.emit(req_id, response or {})
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/plugins/completion/kite/client.py b/spyder/plugins/completion/kite/client.py
--- a/spyder/plugins/completion/kite/client.py
+++ b/spyder/plugins/completion/kite/client.py
@@ -79,7 +79,7 @@
def get_languages(self):
verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT
success, response = self.perform_http_request(verb, url)
- if response is None:
+ if response is None or isinstance(response, TEXT_TYPES):
response = ['python']
return response
@@ -112,16 +112,12 @@
kite_status = status()
self.sig_status_response_ready[str].emit(kite_status)
elif isinstance(kite_status, TEXT_TYPES):
- if not success_status:
- status_str = status(extra_status=' with errors')
- long_str = _("<code>{error}</code><br><br>"
- "Note: If you are using a VPN, "
- "please don't route requests to "
- "localhost/127.0.0.1 with it").format(
- error=kite_status)
- else:
- status_str = status()
- long_str = kite_status
+ status_str = status(extra_status=' with errors')
+ long_str = _("<code>{error}</code><br><br>"
+ "Note: If you are using a VPN, "
+ "please don't route requests to "
+ "localhost/127.0.0.1 with it").format(
+ error=kite_status)
kite_status_dict = {
'status': status_str,
'short': status_str,
| {"golden_diff": "diff --git a/spyder/plugins/completion/kite/client.py b/spyder/plugins/completion/kite/client.py\n--- a/spyder/plugins/completion/kite/client.py\n+++ b/spyder/plugins/completion/kite/client.py\n@@ -79,7 +79,7 @@\n def get_languages(self):\n verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n- if response is None:\n+ if response is None or isinstance(response, TEXT_TYPES):\n response = ['python']\n return response\n \n@@ -112,16 +112,12 @@\n kite_status = status()\n self.sig_status_response_ready[str].emit(kite_status)\n elif isinstance(kite_status, TEXT_TYPES):\n- if not success_status:\n- status_str = status(extra_status=' with errors')\n- long_str = _(\"<code>{error}</code><br><br>\"\n- \"Note: If you are using a VPN, \"\n- \"please don't route requests to \"\n- \"localhost/127.0.0.1 with it\").format(\n- error=kite_status)\n- else:\n- status_str = status()\n- long_str = kite_status\n+ status_str = status(extra_status=' with errors')\n+ long_str = _(\"<code>{error}</code><br><br>\"\n+ \"Note: If you are using a VPN, \"\n+ \"please don't route requests to \"\n+ \"localhost/127.0.0.1 with it\").format(\n+ error=kite_status)\n kite_status_dict = {\n 'status': status_str,\n 'short': status_str,\n", "issue": "Check handling of str responses in Kite client signals\nSorry for taking so long to respond. I've included everything that I thought might be relevant. I think @metalogical has it right, however I it would be pretty difficult for me to convince the security guys at work to change anything. Would it be possible to change the error handling so it is able to continue after an error like this? Especially since I have explicitly disabled Kite and the software works fine if it encounters the error when already running. \r\n\r\n\t2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Start completion server for C:\\Users\\[removed]\\.spyder-py3\\temp.py [Python]\r\n\t2020-04-20 12:05:42,162 [DEBUG] [spyder.plugins.editor.plugin] -> Python completion server is ready\r\n\t2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.widgets.codeeditor] -> Completions services available for: C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,164 [DEBUG] [spyder.plugins.editor.plugin] -> python completion server request: 'textDocument/didOpen'\r\n\t2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from lsp\r\n\t2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Gather responses for textDocument/didOpen\r\n\t2020-04-20 12:05:42,165 [DEBUG] [spyder.plugins.completion.plugin] -> Completion plugin: Request 0 Got response from kite\r\n\t2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Set focus to: C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,166 [DEBUG] [spyder.plugins.completion.fallback.actor] -> Got request id 0: textDocument/didOpen for file C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,167 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Current changed: 0 - C:\\Users\\[removed]\\.spyder-py3\\temp.py\r\n\t2020-04-20 12:05:42,170 [DEBUG] [spyder.plugins.editor.widgets.editor] -> Added thread <spyder.plugins.editor.widgets.editor.AnalysisThread object at 0x0000019B03720A68> to queue\r\n\t2020-04-20 12:05:42,189 [INFO] [spyder.app.mainwindow] -> Launching code completion client for Python...\r\n\t2020-04-20 12:05:42,203 [DEBUG] [spyder.plugins.completion.kite.client] -> Starting Kite HTTP session...\r\n\t2020-04-20 12:05:42,208 [DEBUG] [urllib3.connectionpool] -> Starting new HTTP connection (1): [vpn server name].[company name].com:80\r\n\t2020-04-20 12:05:42,512 [DEBUG] [urllib3.connectionpool] -> http://[vpn server name].[company name].com:80 \"GET http://127.0.0.1:46624/clientapi/languages HTTP/1.1\" 200 7277\r\n\tTraceback (most recent call last):\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\app\\mainwindow.py\", line 3718, in main\r\n\t\tmainwindow = run_spyder(app, options, args)\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\app\\mainwindow.py\", line 3559, in run_spyder\r\n\t\tmain.setup()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\app\\mainwindow.py\", line 960, in setup\r\n\t\tself.completions.start()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\plugins\\completion\\plugin.py\", line 292, in start\r\n\t\tclient_info['plugin'].start()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\plugins\\completion\\kite\\plugin.py\", line 144, in start\r\n\t\tself.client.start()\r\n\t File \"C:\\Progra~1\\Anaconda3\\lib\\site-packages\\spyder\\plugins\\completion\\kite\\client.py\", line 62, in start\r\n\t\tself.sig_client_started.emit(self.languages)\r\n\tTypeError: KiteClient.sig_client_started[list].emit(): argument 1 has unexpected type 'str'\r\n\r\n_Originally posted by @nsluhrs in https://github.com/spyder-ide/spyder/issues/12357#issuecomment-616665440_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Kite completions HTTP client.\"\"\"\n\n# Standard library imports\nimport logging\ntry:\n from urllib import quote # Python 2\nexcept ImportError:\n from urllib.parse import quote # Python 3\n\n# Third party imports\nfrom qtpy.QtCore import QObject, QThread, Signal, QMutex\nimport requests\n\n# Local imports\nfrom spyder.config.base import _\nfrom spyder.plugins.completion.kite import KITE_ENDPOINTS, KITE_REQUEST_MAPPING\nfrom spyder.plugins.completion.kite.decorators import class_register\nfrom spyder.plugins.completion.kite.providers import KiteMethodProviderMixIn\nfrom spyder.plugins.completion.kite.utils.status import (\n status, check_if_kite_running)\nfrom spyder.py3compat import (\n ConnectionError, ConnectionRefusedError, TEXT_TYPES)\n\n\nlogger = logging.getLogger(__name__)\n\n\n@class_register\nclass KiteClient(QObject, KiteMethodProviderMixIn):\n sig_response_ready = Signal(int, dict)\n sig_client_started = Signal(list)\n sig_client_not_responding = Signal()\n sig_perform_request = Signal(int, str, object)\n sig_perform_status_request = Signal(str)\n sig_status_response_ready = Signal((str,), (dict,))\n sig_perform_onboarding_request = Signal()\n sig_onboarding_response_ready = Signal(str)\n\n def __init__(self, parent, enable_code_snippets=True):\n QObject.__init__(self, parent)\n self.endpoint = None\n self.requests = {}\n self.languages = []\n self.mutex = QMutex()\n self.opened_files = {}\n self.opened_files_status = {}\n self.thread_started = False\n self.enable_code_snippets = enable_code_snippets\n self.thread = QThread()\n self.moveToThread(self.thread)\n self.thread.started.connect(self.started)\n self.sig_perform_request.connect(self.perform_request)\n self.sig_perform_status_request.connect(self.get_status)\n self.sig_perform_onboarding_request.connect(self.get_onboarding_file)\n\n def start(self):\n if not self.thread_started:\n self.thread.start()\n logger.debug('Starting Kite HTTP session...')\n self.endpoint = requests.Session()\n self.languages = self.get_languages()\n self.sig_client_started.emit(self.languages)\n\n def started(self):\n self.thread_started = True\n\n def stop(self):\n if self.thread_started:\n logger.debug('Closing Kite HTTP session...')\n self.endpoint.close()\n self.thread.quit()\n\n def get_languages(self):\n verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n if response is None:\n response = ['python']\n return response\n\n def _get_onboarding_file(self):\n \"\"\"Perform a request to get kite's onboarding file.\"\"\"\n verb, url = KITE_ENDPOINTS.ONBOARDING_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n return response\n\n def get_onboarding_file(self):\n \"\"\"Get onboarding file.\"\"\"\n onboarding_file = self._get_onboarding_file()\n self.sig_onboarding_response_ready.emit(onboarding_file)\n\n def _get_status(self, filename):\n \"\"\"Perform a request to get kite status for a file.\"\"\"\n verb, url = KITE_ENDPOINTS.STATUS_ENDPOINT\n if filename:\n url_params = {'filename': filename}\n else:\n url_params = {'filetype': 'python'}\n success, response = self.perform_http_request(\n verb, url, url_params=url_params)\n return success, response\n\n def get_status(self, filename):\n \"\"\"Get kite status for a given filename.\"\"\"\n success_status, kite_status = self._get_status(filename)\n if not filename or kite_status is None:\n kite_status = status()\n self.sig_status_response_ready[str].emit(kite_status)\n elif isinstance(kite_status, TEXT_TYPES):\n if not success_status:\n status_str = status(extra_status=' with errors')\n long_str = _(\"<code>{error}</code><br><br>\"\n \"Note: If you are using a VPN, \"\n \"please don't route requests to \"\n \"localhost/127.0.0.1 with it\").format(\n error=kite_status)\n else:\n status_str = status()\n long_str = kite_status\n kite_status_dict = {\n 'status': status_str,\n 'short': status_str,\n 'long': long_str}\n self.sig_status_response_ready[dict].emit(kite_status_dict)\n else:\n self.sig_status_response_ready[dict].emit(kite_status)\n\n def perform_http_request(self, verb, url, url_params=None, params=None):\n response = None\n http_method = getattr(self.endpoint, verb)\n try:\n http_response = http_method(url, params=url_params, json=params)\n except Exception as error:\n return False, None\n success = http_response.status_code == 200\n if success:\n try:\n response = http_response.json()\n except Exception:\n response = http_response.text\n response = None if response == '' else response\n return success, response\n\n def send(self, method, params, url_params):\n response = None\n if self.endpoint is not None and method in KITE_REQUEST_MAPPING:\n http_verb, path = KITE_REQUEST_MAPPING[method]\n encoded_url_params = {\n key: quote(value) if isinstance(value, TEXT_TYPES) else value\n for (key, value) in url_params.items()}\n path = path.format(**encoded_url_params)\n try:\n success, response = self.perform_http_request(\n http_verb, path, params=params)\n except (ConnectionRefusedError, ConnectionError):\n return response\n return response\n\n def perform_request(self, req_id, method, params):\n response = None\n if method in self.sender_registry:\n logger.debug('Perform {0} request with id {1}'.format(\n method, req_id))\n handler_name = self.sender_registry[method]\n handler = getattr(self, handler_name)\n response = handler(params)\n if method in self.handler_registry:\n converter_name = self.handler_registry[method]\n converter = getattr(self, converter_name)\n if response is not None:\n response = converter(response)\n self.sig_response_ready.emit(req_id, response or {})\n", "path": "spyder/plugins/completion/kite/client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Kite completions HTTP client.\"\"\"\n\n# Standard library imports\nimport logging\ntry:\n from urllib import quote # Python 2\nexcept ImportError:\n from urllib.parse import quote # Python 3\n\n# Third party imports\nfrom qtpy.QtCore import QObject, QThread, Signal, QMutex\nimport requests\n\n# Local imports\nfrom spyder.config.base import _\nfrom spyder.plugins.completion.kite import KITE_ENDPOINTS, KITE_REQUEST_MAPPING\nfrom spyder.plugins.completion.kite.decorators import class_register\nfrom spyder.plugins.completion.kite.providers import KiteMethodProviderMixIn\nfrom spyder.plugins.completion.kite.utils.status import (\n status, check_if_kite_running)\nfrom spyder.py3compat import (\n ConnectionError, ConnectionRefusedError, TEXT_TYPES)\n\n\nlogger = logging.getLogger(__name__)\n\n\n@class_register\nclass KiteClient(QObject, KiteMethodProviderMixIn):\n sig_response_ready = Signal(int, dict)\n sig_client_started = Signal(list)\n sig_client_not_responding = Signal()\n sig_perform_request = Signal(int, str, object)\n sig_perform_status_request = Signal(str)\n sig_status_response_ready = Signal((str,), (dict,))\n sig_perform_onboarding_request = Signal()\n sig_onboarding_response_ready = Signal(str)\n\n def __init__(self, parent, enable_code_snippets=True):\n QObject.__init__(self, parent)\n self.endpoint = None\n self.requests = {}\n self.languages = []\n self.mutex = QMutex()\n self.opened_files = {}\n self.opened_files_status = {}\n self.thread_started = False\n self.enable_code_snippets = enable_code_snippets\n self.thread = QThread()\n self.moveToThread(self.thread)\n self.thread.started.connect(self.started)\n self.sig_perform_request.connect(self.perform_request)\n self.sig_perform_status_request.connect(self.get_status)\n self.sig_perform_onboarding_request.connect(self.get_onboarding_file)\n\n def start(self):\n if not self.thread_started:\n self.thread.start()\n logger.debug('Starting Kite HTTP session...')\n self.endpoint = requests.Session()\n self.languages = self.get_languages()\n self.sig_client_started.emit(self.languages)\n\n def started(self):\n self.thread_started = True\n\n def stop(self):\n if self.thread_started:\n logger.debug('Closing Kite HTTP session...')\n self.endpoint.close()\n self.thread.quit()\n\n def get_languages(self):\n verb, url = KITE_ENDPOINTS.LANGUAGES_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n if response is None or isinstance(response, TEXT_TYPES):\n response = ['python']\n return response\n\n def _get_onboarding_file(self):\n \"\"\"Perform a request to get kite's onboarding file.\"\"\"\n verb, url = KITE_ENDPOINTS.ONBOARDING_ENDPOINT\n success, response = self.perform_http_request(verb, url)\n return response\n\n def get_onboarding_file(self):\n \"\"\"Get onboarding file.\"\"\"\n onboarding_file = self._get_onboarding_file()\n self.sig_onboarding_response_ready.emit(onboarding_file)\n\n def _get_status(self, filename):\n \"\"\"Perform a request to get kite status for a file.\"\"\"\n verb, url = KITE_ENDPOINTS.STATUS_ENDPOINT\n if filename:\n url_params = {'filename': filename}\n else:\n url_params = {'filetype': 'python'}\n success, response = self.perform_http_request(\n verb, url, url_params=url_params)\n return success, response\n\n def get_status(self, filename):\n \"\"\"Get kite status for a given filename.\"\"\"\n success_status, kite_status = self._get_status(filename)\n if not filename or kite_status is None:\n kite_status = status()\n self.sig_status_response_ready[str].emit(kite_status)\n elif isinstance(kite_status, TEXT_TYPES):\n status_str = status(extra_status=' with errors')\n long_str = _(\"<code>{error}</code><br><br>\"\n \"Note: If you are using a VPN, \"\n \"please don't route requests to \"\n \"localhost/127.0.0.1 with it\").format(\n error=kite_status)\n kite_status_dict = {\n 'status': status_str,\n 'short': status_str,\n 'long': long_str}\n self.sig_status_response_ready[dict].emit(kite_status_dict)\n else:\n self.sig_status_response_ready[dict].emit(kite_status)\n\n def perform_http_request(self, verb, url, url_params=None, params=None):\n response = None\n http_method = getattr(self.endpoint, verb)\n try:\n http_response = http_method(url, params=url_params, json=params)\n except Exception as error:\n return False, None\n success = http_response.status_code == 200\n if success:\n try:\n response = http_response.json()\n except Exception:\n response = http_response.text\n response = None if response == '' else response\n return success, response\n\n def send(self, method, params, url_params):\n response = None\n if self.endpoint is not None and method in KITE_REQUEST_MAPPING:\n http_verb, path = KITE_REQUEST_MAPPING[method]\n encoded_url_params = {\n key: quote(value) if isinstance(value, TEXT_TYPES) else value\n for (key, value) in url_params.items()}\n path = path.format(**encoded_url_params)\n try:\n success, response = self.perform_http_request(\n http_verb, path, params=params)\n except (ConnectionRefusedError, ConnectionError):\n return response\n return response\n\n def perform_request(self, req_id, method, params):\n response = None\n if method in self.sender_registry:\n logger.debug('Perform {0} request with id {1}'.format(\n method, req_id))\n handler_name = self.sender_registry[method]\n handler = getattr(self, handler_name)\n response = handler(params)\n if method in self.handler_registry:\n converter_name = self.handler_registry[method]\n converter = getattr(self, converter_name)\n if response is not None:\n response = converter(response)\n self.sig_response_ready.emit(req_id, response or {})\n", "path": "spyder/plugins/completion/kite/client.py"}]} | 3,372 | 376 |
gh_patches_debug_10681 | rasdani/github-patches | git_diff | translate__translate-3472 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
po2prop is not working as expected
I've spend some days by now and I can't seem to find any solution.
I have a strings file from my ios app and I'm able to convert it successfully to a po using prop2po using this command.
`prop2po --encoding utf8 -t ../en.lproj/Localizable.strings Localizable.strings es.po`
I'm executing this command inside the `/Strings/es.lproj` where the Localizables.strings is the one having the Spanish translations.
So it creates a es.po file with the msgid form the template and the translations from the Localizables.strings from the current path.
Then when I try to convert back my es.po to a new Localizables.strings with the new translations I'm getting always the template.
I'm using this command:
`po2prop --encoding utf8 -i es.po -o new.strings -t ../en.lproj/Localizable.strings`
new.strings is always the same as the template that I'm using instead of having the translation from the es.po.
Also I think there is something wrong with the gettext file parser as I have to delete this two lines from the es.po generated file in order to work.
`#, fuzzy`
`msgid ""` <--
`msgstr ""` <--
I've added the commented fuzzy but that line does not creates trouble.
If I don't delete theese two lines when I try to use the po2prop command I get this error:
`template ../en.lproj/Localizable.strings: 'NoneType' object has no attribute 'istranslated'`
Is quite misleading as it seems that is has a problem with the template but after many tries I manage to figure that the problem was in the po file.(Or maybe not and I'm missing something).
I'm using the 2.0.0b2 version.
I hope someone can tell me what I'm doing wrong..or if this is actually a bug since I've tried many ways to convert from po2prop using all the settings available and I'm not able to do it right.
Supposedly this should work...
http://stackoverflow.com/questions/13939766/are-there-any-good-tools-for-ios-pseudo-localization
Or this :
`po2prop -t some.properties some.po some-other.properties` from http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html
Thanks,
Rubén.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `translate/convert/po2prop.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2002-2006 Zuza Software Foundation
5 #
6 # This file is part of translate.
7 #
8 # translate is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
12 #
13 # translate is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this program; if not, see <http://www.gnu.org/licenses/>.
20
21 """Convert Gettext PO localization files to Java/Mozilla .properties files.
22
23 See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html
24 for examples and usage instructions.
25 """
26
27 import six
28 import warnings
29
30 from translate.convert import accesskey, convert
31 from translate.misc import quote
32 from translate.storage import po, properties
33
34
35 eol = u"\n"
36
37
38 def applytranslation(key, propunit, inunit, mixedkeys):
39 """applies the translation for key in the po unit to the prop unit"""
40 # this converts the po-style string to a prop-style string
41 value = inunit.target
42 # handle mixed keys
43 for labelsuffix in properties.labelsuffixes:
44 if key.endswith(labelsuffix):
45 if key in mixedkeys:
46 value, akey = accesskey.extract(value)
47 break
48 else:
49 for akeysuffix in properties.accesskeysuffixes:
50 if key.endswith(akeysuffix):
51 if key in mixedkeys:
52 label, value = accesskey.extract(value)
53 if not value:
54 warnings.warn("Could not find accesskey for %s" % key)
55 # Use the source language accesskey
56 label, value = accesskey.extract(inunit.source)
57 else:
58 original = propunit.source
59 # For the sake of diffs we keep the case of the
60 # accesskey the same if we know the translation didn't
61 # change. Casing matters in XUL.
62 if value == propunit.source and original.lower() == value.lower():
63 if original.isupper():
64 value = value.upper()
65 elif original.islower():
66 value = value.lower()
67 return value
68
69
70 class reprop(object):
71
72 def __init__(self, templatefile, inputstore, personality, encoding=None,
73 remove_untranslated=False):
74 self.templatefile = templatefile
75 self.inputstore = inputstore
76 self.personality = properties.get_dialect(personality)
77 self.encoding = encoding
78 if self.encoding is None:
79 self.encoding = self.personality.default_encoding
80 self.remove_untranslated = remove_untranslated
81 self.mixer = accesskey.UnitMixer(properties.labelsuffixes,
82 properties.accesskeysuffixes)
83
84 def convertstore(self, includefuzzy=False):
85 self.includefuzzy = includefuzzy
86 self.inmultilinemsgid = False
87 self.inecho = False
88 self.inputstore.makeindex()
89 if self.personality.name == "gaia":
90 self._explode_gaia_plurals()
91 outputlines = []
92 # Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead
93 content = self.templatefile.read().decode(self.encoding)
94 for line in content.splitlines(True):
95 outputstr = self.convertline(line)
96 outputlines.append(outputstr)
97 return u"".join(outputlines).encode(self.encoding)
98
99 def _handle_accesskeys(self, inunit, currkey):
100 value = inunit.target
101 if self.personality.name == "mozilla":
102 keys = inunit.getlocations()
103 mixedkeys = self.mixer.match_entities(keys)
104 for key in keys:
105 if key == currkey and key in self.inputstore.locationindex:
106 propunit = self.inputstore.locationindex[key] # find the prop
107 value = applytranslation(key, propunit, inunit, mixedkeys)
108 break
109
110 return value
111
112 def _explode_gaia_plurals(self):
113 """Explode the gaia plurals."""
114 from translate.lang import data
115 for unit in self.inputstore.units:
116 if not unit.hasplural():
117 continue
118 if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():
119 continue
120
121 names = data.cldr_plural_categories
122 location = unit.getlocations()[0]
123 for category, text in zip(names, unit.target.strings):
124 # TODO: for now we assume all forms are present. We need to
125 # fill in the rest after mapping things to the proper CLDR names.
126 if category == 'zero':
127 # [zero] cases are translated as separate units
128 continue
129 new_unit = self.inputstore.addsourceunit(u"fish") # not used
130 new_location = '%s[%s]' % (location, category)
131 new_unit.addlocation(new_location)
132 new_unit.target = text
133 self.inputstore.locationindex[new_location] = new_unit
134
135 # We don't want the plural marker to be translated:
136 del self.inputstore.locationindex[location]
137
138 def convertline(self, line):
139 returnline = u""
140 # handle multiline msgid if we're in one
141 if self.inmultilinemsgid:
142 msgid = quote.rstripeol(line).strip()
143 # see if there's more
144 self.inmultilinemsgid = (msgid[-1:] == '\\')
145 # if we're echoing...
146 if self.inecho:
147 returnline = line
148 # otherwise, this could be a comment
149 elif line.strip()[:1] == '#':
150 returnline = quote.rstripeol(line) + eol
151 else:
152 line = quote.rstripeol(line)
153 delimiter_char, delimiter_pos = self.personality.find_delimiter(line)
154 if quote.rstripeol(line)[-1:] == '\\':
155 self.inmultilinemsgid = True
156 if delimiter_pos == -1:
157 key = self.personality.key_strip(line)
158 delimiter = " %s " % self.personality.delimiters[0]
159 else:
160 key = self.personality.key_strip(line[:delimiter_pos])
161 # Calculate space around the equal sign
162 prespace = line[line.find(' ', len(key)):delimiter_pos]
163 postspacestart = len(line[delimiter_pos+1:])
164 postspaceend = len(line[delimiter_pos+1:].lstrip())
165 postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]
166 delimiter = prespace + delimiter_char + postspace
167 if key in self.inputstore.locationindex:
168 unit = self.inputstore.locationindex[key]
169 if not unit.istranslated() and bool(unit.source) and self.remove_untranslated:
170 returnline = u""
171 self.inecho = False
172 else:
173 if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:
174 value = unit.source
175 else:
176 value = self._handle_accesskeys(unit, key)
177 self.inecho = False
178 assert isinstance(value, six.text_type)
179 returnline = "%(key)s%(del)s%(value)s%(term)s%(eol)s" % {
180 "key": "%s%s%s" % (self.personality.key_wrap_char,
181 key,
182 self.personality.key_wrap_char),
183 "del": delimiter,
184 "value": "%s%s%s" % (self.personality.value_wrap_char,
185 self.personality.encode(value),
186 self.personality.value_wrap_char),
187 "term": self.personality.pair_terminator,
188 "eol": eol,
189 }
190 else:
191 self.inecho = True
192 returnline = line + eol
193 assert isinstance(returnline, six.text_type)
194 return returnline
195
196
197 def convertstrings(inputfile, outputfile, templatefile, personality="strings",
198 includefuzzy=False, encoding=None, outputthreshold=None,
199 remove_untranslated=False):
200 """.strings specific convertor function"""
201 return convertprop(inputfile, outputfile, templatefile,
202 personality="strings", includefuzzy=includefuzzy,
203 encoding=encoding, outputthreshold=outputthreshold,
204 remove_untranslated=remove_untranslated)
205
206
207 def convertmozillaprop(inputfile, outputfile, templatefile,
208 includefuzzy=False, remove_untranslated=False,
209 outputthreshold=None):
210 """Mozilla specific convertor function"""
211 return convertprop(inputfile, outputfile, templatefile,
212 personality="mozilla", includefuzzy=includefuzzy,
213 remove_untranslated=remove_untranslated,
214 outputthreshold=outputthreshold)
215
216
217 def convertprop(inputfile, outputfile, templatefile, personality="java",
218 includefuzzy=False, encoding=None, remove_untranslated=False,
219 outputthreshold=None):
220 inputstore = po.pofile(inputfile)
221
222 if not convert.should_output_store(inputstore, outputthreshold):
223 return False
224
225 if templatefile is None:
226 raise ValueError("must have template file for properties files")
227 # convertor = po2prop()
228 else:
229 convertor = reprop(templatefile, inputstore, personality, encoding,
230 remove_untranslated)
231 outputprop = convertor.convertstore(includefuzzy)
232 outputfile.write(outputprop)
233 return True
234
235
236 formats = {
237 ("po", "properties"): ("properties", convertprop),
238 ("po", "lang"): ("lang", convertprop),
239 ("po", "strings"): ("strings", convertstrings),
240 }
241
242
243 def main(argv=None):
244 # handle command line options
245 parser = convert.ConvertOptionParser(formats, usetemplates=True,
246 description=__doc__)
247 parser.add_option(
248 "", "--personality", dest="personality",
249 default=properties.default_dialect, type="choice",
250 choices=list(properties.dialects.keys()),
251 help="override the input file format: %s (for .properties files, default: %s)" % (
252 ", ".join(six.iterkeys(properties.dialects)), properties.default_dialect),
253 metavar="TYPE")
254 parser.add_option(
255 "", "--encoding", dest="encoding", default=None,
256 help="override the encoding set by the personality",
257 metavar="ENCODING")
258 parser.add_option(
259 "", "--removeuntranslated", dest="remove_untranslated",
260 default=False, action="store_true",
261 help="remove key value from output if it is untranslated")
262 parser.add_threshold_option()
263 parser.add_fuzzy_option()
264 parser.passthrough.append("personality")
265 parser.passthrough.append("encoding")
266 parser.passthrough.append("remove_untranslated")
267 parser.run(argv)
268
269 if __name__ == '__main__':
270 main()
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/translate/convert/po2prop.py b/translate/convert/po2prop.py
--- a/translate/convert/po2prop.py
+++ b/translate/convert/po2prop.py
@@ -166,7 +166,7 @@
delimiter = prespace + delimiter_char + postspace
if key in self.inputstore.locationindex:
unit = self.inputstore.locationindex[key]
- if not unit.istranslated() and bool(unit.source) and self.remove_untranslated:
+ if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:
returnline = u""
self.inecho = False
else:
| {"golden_diff": "diff --git a/translate/convert/po2prop.py b/translate/convert/po2prop.py\n--- a/translate/convert/po2prop.py\n+++ b/translate/convert/po2prop.py\n@@ -166,7 +166,7 @@\n delimiter = prespace + delimiter_char + postspace\n if key in self.inputstore.locationindex:\n unit = self.inputstore.locationindex[key]\n- if not unit.istranslated() and bool(unit.source) and self.remove_untranslated:\n+ if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:\n returnline = u\"\"\n self.inecho = False\n else:\n", "issue": "po2prop is not working as expected\nI've spend some days by now and I can't seem to find any solution.\n\nI have a strings file from my ios app and I'm able to convert it successfully to a po using prop2po using this command.\n\n`prop2po --encoding utf8 -t ../en.lproj/Localizable.strings Localizable.strings es.po`\n\nI'm executing this command inside the `/Strings/es.lproj` where the Localizables.strings is the one having the Spanish translations.\n\nSo it creates a es.po file with the msgid form the template and the translations from the Localizables.strings from the current path.\n\nThen when I try to convert back my es.po to a new Localizables.strings with the new translations I'm getting always the template.\n\nI'm using this command:\n`po2prop --encoding utf8 -i es.po -o new.strings -t ../en.lproj/Localizable.strings`\n\nnew.strings is always the same as the template that I'm using instead of having the translation from the es.po.\n\nAlso I think there is something wrong with the gettext file parser as I have to delete this two lines from the es.po generated file in order to work.\n`#, fuzzy`\n`msgid \"\"` <--\n`msgstr \"\"` <--\n\nI've added the commented fuzzy but that line does not creates trouble.\n\nIf I don't delete theese two lines when I try to use the po2prop command I get this error:\n`template ../en.lproj/Localizable.strings: 'NoneType' object has no attribute 'istranslated'`\n\nIs quite misleading as it seems that is has a problem with the template but after many tries I manage to figure that the problem was in the po file.(Or maybe not and I'm missing something).\n\nI'm using the 2.0.0b2 version.\n\nI hope someone can tell me what I'm doing wrong..or if this is actually a bug since I've tried many ways to convert from po2prop using all the settings available and I'm not able to do it right.\n\nSupposedly this should work...\n\nhttp://stackoverflow.com/questions/13939766/are-there-any-good-tools-for-ios-pseudo-localization\n\nOr this : \n\n`po2prop -t some.properties some.po some-other.properties` from http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html\n\nThanks,\nRub\u00e9n.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Java/Mozilla .properties files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html\nfor examples and usage instructions.\n\"\"\"\n\nimport six\nimport warnings\n\nfrom translate.convert import accesskey, convert\nfrom translate.misc import quote\nfrom translate.storage import po, properties\n\n\neol = u\"\\n\"\n\n\ndef applytranslation(key, propunit, inunit, mixedkeys):\n \"\"\"applies the translation for key in the po unit to the prop unit\"\"\"\n # this converts the po-style string to a prop-style string\n value = inunit.target\n # handle mixed keys\n for labelsuffix in properties.labelsuffixes:\n if key.endswith(labelsuffix):\n if key in mixedkeys:\n value, akey = accesskey.extract(value)\n break\n else:\n for akeysuffix in properties.accesskeysuffixes:\n if key.endswith(akeysuffix):\n if key in mixedkeys:\n label, value = accesskey.extract(value)\n if not value:\n warnings.warn(\"Could not find accesskey for %s\" % key)\n # Use the source language accesskey\n label, value = accesskey.extract(inunit.source)\n else:\n original = propunit.source\n # For the sake of diffs we keep the case of the\n # accesskey the same if we know the translation didn't\n # change. Casing matters in XUL.\n if value == propunit.source and original.lower() == value.lower():\n if original.isupper():\n value = value.upper()\n elif original.islower():\n value = value.lower()\n return value\n\n\nclass reprop(object):\n\n def __init__(self, templatefile, inputstore, personality, encoding=None,\n remove_untranslated=False):\n self.templatefile = templatefile\n self.inputstore = inputstore\n self.personality = properties.get_dialect(personality)\n self.encoding = encoding\n if self.encoding is None:\n self.encoding = self.personality.default_encoding\n self.remove_untranslated = remove_untranslated\n self.mixer = accesskey.UnitMixer(properties.labelsuffixes,\n properties.accesskeysuffixes)\n\n def convertstore(self, includefuzzy=False):\n self.includefuzzy = includefuzzy\n self.inmultilinemsgid = False\n self.inecho = False\n self.inputstore.makeindex()\n if self.personality.name == \"gaia\":\n self._explode_gaia_plurals()\n outputlines = []\n # Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead\n content = self.templatefile.read().decode(self.encoding)\n for line in content.splitlines(True):\n outputstr = self.convertline(line)\n outputlines.append(outputstr)\n return u\"\".join(outputlines).encode(self.encoding)\n\n def _handle_accesskeys(self, inunit, currkey):\n value = inunit.target\n if self.personality.name == \"mozilla\":\n keys = inunit.getlocations()\n mixedkeys = self.mixer.match_entities(keys)\n for key in keys:\n if key == currkey and key in self.inputstore.locationindex:\n propunit = self.inputstore.locationindex[key] # find the prop\n value = applytranslation(key, propunit, inunit, mixedkeys)\n break\n\n return value\n\n def _explode_gaia_plurals(self):\n \"\"\"Explode the gaia plurals.\"\"\"\n from translate.lang import data\n for unit in self.inputstore.units:\n if not unit.hasplural():\n continue\n if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():\n continue\n\n names = data.cldr_plural_categories\n location = unit.getlocations()[0]\n for category, text in zip(names, unit.target.strings):\n # TODO: for now we assume all forms are present. We need to\n # fill in the rest after mapping things to the proper CLDR names.\n if category == 'zero':\n # [zero] cases are translated as separate units\n continue\n new_unit = self.inputstore.addsourceunit(u\"fish\") # not used\n new_location = '%s[%s]' % (location, category)\n new_unit.addlocation(new_location)\n new_unit.target = text\n self.inputstore.locationindex[new_location] = new_unit\n\n # We don't want the plural marker to be translated:\n del self.inputstore.locationindex[location]\n\n def convertline(self, line):\n returnline = u\"\"\n # handle multiline msgid if we're in one\n if self.inmultilinemsgid:\n msgid = quote.rstripeol(line).strip()\n # see if there's more\n self.inmultilinemsgid = (msgid[-1:] == '\\\\')\n # if we're echoing...\n if self.inecho:\n returnline = line\n # otherwise, this could be a comment\n elif line.strip()[:1] == '#':\n returnline = quote.rstripeol(line) + eol\n else:\n line = quote.rstripeol(line)\n delimiter_char, delimiter_pos = self.personality.find_delimiter(line)\n if quote.rstripeol(line)[-1:] == '\\\\':\n self.inmultilinemsgid = True\n if delimiter_pos == -1:\n key = self.personality.key_strip(line)\n delimiter = \" %s \" % self.personality.delimiters[0]\n else:\n key = self.personality.key_strip(line[:delimiter_pos])\n # Calculate space around the equal sign\n prespace = line[line.find(' ', len(key)):delimiter_pos]\n postspacestart = len(line[delimiter_pos+1:])\n postspaceend = len(line[delimiter_pos+1:].lstrip())\n postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]\n delimiter = prespace + delimiter_char + postspace\n if key in self.inputstore.locationindex:\n unit = self.inputstore.locationindex[key]\n if not unit.istranslated() and bool(unit.source) and self.remove_untranslated:\n returnline = u\"\"\n self.inecho = False\n else:\n if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:\n value = unit.source\n else:\n value = self._handle_accesskeys(unit, key)\n self.inecho = False\n assert isinstance(value, six.text_type)\n returnline = \"%(key)s%(del)s%(value)s%(term)s%(eol)s\" % {\n \"key\": \"%s%s%s\" % (self.personality.key_wrap_char,\n key,\n self.personality.key_wrap_char),\n \"del\": delimiter,\n \"value\": \"%s%s%s\" % (self.personality.value_wrap_char,\n self.personality.encode(value),\n self.personality.value_wrap_char),\n \"term\": self.personality.pair_terminator,\n \"eol\": eol,\n }\n else:\n self.inecho = True\n returnline = line + eol\n assert isinstance(returnline, six.text_type)\n return returnline\n\n\ndef convertstrings(inputfile, outputfile, templatefile, personality=\"strings\",\n includefuzzy=False, encoding=None, outputthreshold=None,\n remove_untranslated=False):\n \"\"\".strings specific convertor function\"\"\"\n return convertprop(inputfile, outputfile, templatefile,\n personality=\"strings\", includefuzzy=includefuzzy,\n encoding=encoding, outputthreshold=outputthreshold,\n remove_untranslated=remove_untranslated)\n\n\ndef convertmozillaprop(inputfile, outputfile, templatefile,\n includefuzzy=False, remove_untranslated=False,\n outputthreshold=None):\n \"\"\"Mozilla specific convertor function\"\"\"\n return convertprop(inputfile, outputfile, templatefile,\n personality=\"mozilla\", includefuzzy=includefuzzy,\n remove_untranslated=remove_untranslated,\n outputthreshold=outputthreshold)\n\n\ndef convertprop(inputfile, outputfile, templatefile, personality=\"java\",\n includefuzzy=False, encoding=None, remove_untranslated=False,\n outputthreshold=None):\n inputstore = po.pofile(inputfile)\n\n if not convert.should_output_store(inputstore, outputthreshold):\n return False\n\n if templatefile is None:\n raise ValueError(\"must have template file for properties files\")\n # convertor = po2prop()\n else:\n convertor = reprop(templatefile, inputstore, personality, encoding,\n remove_untranslated)\n outputprop = convertor.convertstore(includefuzzy)\n outputfile.write(outputprop)\n return True\n\n\nformats = {\n (\"po\", \"properties\"): (\"properties\", convertprop),\n (\"po\", \"lang\"): (\"lang\", convertprop),\n (\"po\", \"strings\"): (\"strings\", convertstrings),\n}\n\n\ndef main(argv=None):\n # handle command line options\n parser = convert.ConvertOptionParser(formats, usetemplates=True,\n description=__doc__)\n parser.add_option(\n \"\", \"--personality\", dest=\"personality\",\n default=properties.default_dialect, type=\"choice\",\n choices=list(properties.dialects.keys()),\n help=\"override the input file format: %s (for .properties files, default: %s)\" % (\n \", \".join(six.iterkeys(properties.dialects)), properties.default_dialect),\n metavar=\"TYPE\")\n parser.add_option(\n \"\", \"--encoding\", dest=\"encoding\", default=None,\n help=\"override the encoding set by the personality\",\n metavar=\"ENCODING\")\n parser.add_option(\n \"\", \"--removeuntranslated\", dest=\"remove_untranslated\",\n default=False, action=\"store_true\",\n help=\"remove key value from output if it is untranslated\")\n parser.add_threshold_option()\n parser.add_fuzzy_option()\n parser.passthrough.append(\"personality\")\n parser.passthrough.append(\"encoding\")\n parser.passthrough.append(\"remove_untranslated\")\n parser.run(argv)\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2prop.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Java/Mozilla .properties files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html\nfor examples and usage instructions.\n\"\"\"\n\nimport six\nimport warnings\n\nfrom translate.convert import accesskey, convert\nfrom translate.misc import quote\nfrom translate.storage import po, properties\n\n\neol = u\"\\n\"\n\n\ndef applytranslation(key, propunit, inunit, mixedkeys):\n \"\"\"applies the translation for key in the po unit to the prop unit\"\"\"\n # this converts the po-style string to a prop-style string\n value = inunit.target\n # handle mixed keys\n for labelsuffix in properties.labelsuffixes:\n if key.endswith(labelsuffix):\n if key in mixedkeys:\n value, akey = accesskey.extract(value)\n break\n else:\n for akeysuffix in properties.accesskeysuffixes:\n if key.endswith(akeysuffix):\n if key in mixedkeys:\n label, value = accesskey.extract(value)\n if not value:\n warnings.warn(\"Could not find accesskey for %s\" % key)\n # Use the source language accesskey\n label, value = accesskey.extract(inunit.source)\n else:\n original = propunit.source\n # For the sake of diffs we keep the case of the\n # accesskey the same if we know the translation didn't\n # change. Casing matters in XUL.\n if value == propunit.source and original.lower() == value.lower():\n if original.isupper():\n value = value.upper()\n elif original.islower():\n value = value.lower()\n return value\n\n\nclass reprop(object):\n\n def __init__(self, templatefile, inputstore, personality, encoding=None,\n remove_untranslated=False):\n self.templatefile = templatefile\n self.inputstore = inputstore\n self.personality = properties.get_dialect(personality)\n self.encoding = encoding\n if self.encoding is None:\n self.encoding = self.personality.default_encoding\n self.remove_untranslated = remove_untranslated\n self.mixer = accesskey.UnitMixer(properties.labelsuffixes,\n properties.accesskeysuffixes)\n\n def convertstore(self, includefuzzy=False):\n self.includefuzzy = includefuzzy\n self.inmultilinemsgid = False\n self.inecho = False\n self.inputstore.makeindex()\n if self.personality.name == \"gaia\":\n self._explode_gaia_plurals()\n outputlines = []\n # Readlines doesn't work for UTF-16, we read() and splitlines(keepends) instead\n content = self.templatefile.read().decode(self.encoding)\n for line in content.splitlines(True):\n outputstr = self.convertline(line)\n outputlines.append(outputstr)\n return u\"\".join(outputlines).encode(self.encoding)\n\n def _handle_accesskeys(self, inunit, currkey):\n value = inunit.target\n if self.personality.name == \"mozilla\":\n keys = inunit.getlocations()\n mixedkeys = self.mixer.match_entities(keys)\n for key in keys:\n if key == currkey and key in self.inputstore.locationindex:\n propunit = self.inputstore.locationindex[key] # find the prop\n value = applytranslation(key, propunit, inunit, mixedkeys)\n break\n\n return value\n\n def _explode_gaia_plurals(self):\n \"\"\"Explode the gaia plurals.\"\"\"\n from translate.lang import data\n for unit in self.inputstore.units:\n if not unit.hasplural():\n continue\n if unit.isfuzzy() and not self.includefuzzy or not unit.istranslated():\n continue\n\n names = data.cldr_plural_categories\n location = unit.getlocations()[0]\n for category, text in zip(names, unit.target.strings):\n # TODO: for now we assume all forms are present. We need to\n # fill in the rest after mapping things to the proper CLDR names.\n if category == 'zero':\n # [zero] cases are translated as separate units\n continue\n new_unit = self.inputstore.addsourceunit(u\"fish\") # not used\n new_location = '%s[%s]' % (location, category)\n new_unit.addlocation(new_location)\n new_unit.target = text\n self.inputstore.locationindex[new_location] = new_unit\n\n # We don't want the plural marker to be translated:\n del self.inputstore.locationindex[location]\n\n def convertline(self, line):\n returnline = u\"\"\n # handle multiline msgid if we're in one\n if self.inmultilinemsgid:\n msgid = quote.rstripeol(line).strip()\n # see if there's more\n self.inmultilinemsgid = (msgid[-1:] == '\\\\')\n # if we're echoing...\n if self.inecho:\n returnline = line\n # otherwise, this could be a comment\n elif line.strip()[:1] == '#':\n returnline = quote.rstripeol(line) + eol\n else:\n line = quote.rstripeol(line)\n delimiter_char, delimiter_pos = self.personality.find_delimiter(line)\n if quote.rstripeol(line)[-1:] == '\\\\':\n self.inmultilinemsgid = True\n if delimiter_pos == -1:\n key = self.personality.key_strip(line)\n delimiter = \" %s \" % self.personality.delimiters[0]\n else:\n key = self.personality.key_strip(line[:delimiter_pos])\n # Calculate space around the equal sign\n prespace = line[line.find(' ', len(key)):delimiter_pos]\n postspacestart = len(line[delimiter_pos+1:])\n postspaceend = len(line[delimiter_pos+1:].lstrip())\n postspace = line[delimiter_pos+1:delimiter_pos+(postspacestart-postspaceend)+1]\n delimiter = prespace + delimiter_char + postspace\n if key in self.inputstore.locationindex:\n unit = self.inputstore.locationindex[key]\n if unit is None or not unit.istranslated() and bool(unit.source) and self.remove_untranslated:\n returnline = u\"\"\n self.inecho = False\n else:\n if unit.isfuzzy() and not self.includefuzzy or len(unit.target) == 0:\n value = unit.source\n else:\n value = self._handle_accesskeys(unit, key)\n self.inecho = False\n assert isinstance(value, six.text_type)\n returnline = \"%(key)s%(del)s%(value)s%(term)s%(eol)s\" % {\n \"key\": \"%s%s%s\" % (self.personality.key_wrap_char,\n key,\n self.personality.key_wrap_char),\n \"del\": delimiter,\n \"value\": \"%s%s%s\" % (self.personality.value_wrap_char,\n self.personality.encode(value),\n self.personality.value_wrap_char),\n \"term\": self.personality.pair_terminator,\n \"eol\": eol,\n }\n else:\n self.inecho = True\n returnline = line + eol\n assert isinstance(returnline, six.text_type)\n return returnline\n\n\ndef convertstrings(inputfile, outputfile, templatefile, personality=\"strings\",\n includefuzzy=False, encoding=None, outputthreshold=None,\n remove_untranslated=False):\n \"\"\".strings specific convertor function\"\"\"\n return convertprop(inputfile, outputfile, templatefile,\n personality=\"strings\", includefuzzy=includefuzzy,\n encoding=encoding, outputthreshold=outputthreshold,\n remove_untranslated=remove_untranslated)\n\n\ndef convertmozillaprop(inputfile, outputfile, templatefile,\n includefuzzy=False, remove_untranslated=False,\n outputthreshold=None):\n \"\"\"Mozilla specific convertor function\"\"\"\n return convertprop(inputfile, outputfile, templatefile,\n personality=\"mozilla\", includefuzzy=includefuzzy,\n remove_untranslated=remove_untranslated,\n outputthreshold=outputthreshold)\n\n\ndef convertprop(inputfile, outputfile, templatefile, personality=\"java\",\n includefuzzy=False, encoding=None, remove_untranslated=False,\n outputthreshold=None):\n inputstore = po.pofile(inputfile)\n\n if not convert.should_output_store(inputstore, outputthreshold):\n return False\n\n if templatefile is None:\n raise ValueError(\"must have template file for properties files\")\n # convertor = po2prop()\n else:\n convertor = reprop(templatefile, inputstore, personality, encoding,\n remove_untranslated)\n outputprop = convertor.convertstore(includefuzzy)\n outputfile.write(outputprop)\n return True\n\n\nformats = {\n (\"po\", \"properties\"): (\"properties\", convertprop),\n (\"po\", \"lang\"): (\"lang\", convertprop),\n (\"po\", \"strings\"): (\"strings\", convertstrings),\n}\n\n\ndef main(argv=None):\n # handle command line options\n parser = convert.ConvertOptionParser(formats, usetemplates=True,\n description=__doc__)\n parser.add_option(\n \"\", \"--personality\", dest=\"personality\",\n default=properties.default_dialect, type=\"choice\",\n choices=list(properties.dialects.keys()),\n help=\"override the input file format: %s (for .properties files, default: %s)\" % (\n \", \".join(six.iterkeys(properties.dialects)), properties.default_dialect),\n metavar=\"TYPE\")\n parser.add_option(\n \"\", \"--encoding\", dest=\"encoding\", default=None,\n help=\"override the encoding set by the personality\",\n metavar=\"ENCODING\")\n parser.add_option(\n \"\", \"--removeuntranslated\", dest=\"remove_untranslated\",\n default=False, action=\"store_true\",\n help=\"remove key value from output if it is untranslated\")\n parser.add_threshold_option()\n parser.add_fuzzy_option()\n parser.passthrough.append(\"personality\")\n parser.passthrough.append(\"encoding\")\n parser.passthrough.append(\"remove_untranslated\")\n parser.run(argv)\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2prop.py"}]} | 3,849 | 157 |
gh_patches_debug_14660 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-103 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A few questions...
I came across a few confusions while I was reading the code in order to write an example. It would be helpful for me if they are clarified ( I may have missed something).
https://github.com/lhotse-speech/lhotse/blob/master/lhotse/kaldi.py#L68
why `duration - start` rather than just `duration`?
https://github.com/lhotse-speech/lhotse/blob/master/lhotse/audio.py#L178
why not `[n_sources, n_channels, n_samples]`?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lhotse/kaldi.py`
Content:
```
1 from collections import defaultdict
2 from pathlib import Path
3 from typing import Dict, Optional, Tuple
4
5 from lhotse.audio import AudioSource, Recording, RecordingSet
6 from lhotse.supervision import SupervisionSegment, SupervisionSet
7 from lhotse.utils import Pathlike
8
9
10 def load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]:
11 """
12 Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests.
13 For this to work, at least the wav.scp file must exist.
14 SupervisionSet is created only when a segments file exists.
15 All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet.
16 In particular, feats.scp files are ignored.
17 """
18 path = Path(path)
19 assert path.is_dir()
20
21 # must exist for RecordingSet
22 recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True)
23
24 durations = defaultdict(float)
25 reco2dur = path / 'reco2dur'
26 if not reco2dur.is_file():
27 raise ValueError(f"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>")
28 with reco2dur.open() as f:
29 for line in f:
30 recording_id, dur = line.strip().split()
31 durations[recording_id] = float(dur)
32
33 audio_set = RecordingSet.from_recordings(
34 Recording(
35 id=recording_id,
36 sources=[
37 AudioSource(
38 type='command' if path_or_cmd.endswith('|') else 'file',
39 channels=[0],
40 source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd
41 )
42 ],
43 sampling_rate=sampling_rate,
44 num_samples=int(durations[recording_id] * sampling_rate),
45 duration=durations[recording_id]
46 )
47 for recording_id, path_or_cmd in recordings.items()
48 )
49
50 # must exist for SupervisionSet
51 segments = path / 'segments'
52 if not segments.is_file():
53 return audio_set, None
54
55 with segments.open() as f:
56 supervision_segments = [l.strip().split() for l in f]
57
58 texts = load_kaldi_text_mapping(path / 'text')
59 speakers = load_kaldi_text_mapping(path / 'utt2spk')
60 genders = load_kaldi_text_mapping(path / 'spk2gender')
61 languages = load_kaldi_text_mapping(path / 'utt2lang')
62
63 supervision_set = SupervisionSet.from_segments(
64 SupervisionSegment(
65 id=segment_id,
66 recording_id=recording_id,
67 start=float(start),
68 duration=float(duration) - float(start),
69 channel=0,
70 text=texts[segment_id],
71 language=languages[segment_id],
72 speaker=speakers[segment_id],
73 gender=genders[speakers[segment_id]]
74 )
75 for segment_id, recording_id, start, duration in supervision_segments
76 )
77
78 return audio_set, supervision_set
79
80
81 def load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]:
82 """Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict."""
83 mapping = defaultdict(lambda: None)
84 if path.is_file():
85 with path.open() as f:
86 mapping = dict(line.strip().split(' ', maxsplit=1) for line in f)
87 elif must_exist:
88 raise ValueError(f"No such file: {path}")
89 return mapping
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lhotse/kaldi.py b/lhotse/kaldi.py
--- a/lhotse/kaldi.py
+++ b/lhotse/kaldi.py
@@ -65,14 +65,14 @@
id=segment_id,
recording_id=recording_id,
start=float(start),
- duration=float(duration) - float(start),
+ duration=float(end) - float(start),
channel=0,
text=texts[segment_id],
language=languages[segment_id],
speaker=speakers[segment_id],
gender=genders[speakers[segment_id]]
)
- for segment_id, recording_id, start, duration in supervision_segments
+ for segment_id, recording_id, start, end in supervision_segments
)
return audio_set, supervision_set
| {"golden_diff": "diff --git a/lhotse/kaldi.py b/lhotse/kaldi.py\n--- a/lhotse/kaldi.py\n+++ b/lhotse/kaldi.py\n@@ -65,14 +65,14 @@\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n- duration=float(duration) - float(start),\n+ duration=float(end) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n- for segment_id, recording_id, start, duration in supervision_segments\n+ for segment_id, recording_id, start, end in supervision_segments\n )\n \n return audio_set, supervision_set\n", "issue": "A few questions...\nI came across a few confusions while I was reading the code in order to write an example. It would be helpful for me if they are clarified ( I may have missed something).\r\n\r\nhttps://github.com/lhotse-speech/lhotse/blob/master/lhotse/kaldi.py#L68\r\nwhy `duration - start` rather than just `duration`?\r\n\r\nhttps://github.com/lhotse-speech/lhotse/blob/master/lhotse/audio.py#L178\r\nwhy not `[n_sources, n_channels, n_samples]`?\r\n\r\nThanks!\n", "before_files": [{"content": "from collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Optional, Tuple\n\nfrom lhotse.audio import AudioSource, Recording, RecordingSet\nfrom lhotse.supervision import SupervisionSegment, SupervisionSet\nfrom lhotse.utils import Pathlike\n\n\ndef load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]:\n \"\"\"\n Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests.\n For this to work, at least the wav.scp file must exist.\n SupervisionSet is created only when a segments file exists.\n All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet.\n In particular, feats.scp files are ignored.\n \"\"\"\n path = Path(path)\n assert path.is_dir()\n\n # must exist for RecordingSet\n recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True)\n\n durations = defaultdict(float)\n reco2dur = path / 'reco2dur'\n if not reco2dur.is_file():\n raise ValueError(f\"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>\")\n with reco2dur.open() as f:\n for line in f:\n recording_id, dur = line.strip().split()\n durations[recording_id] = float(dur)\n\n audio_set = RecordingSet.from_recordings(\n Recording(\n id=recording_id,\n sources=[\n AudioSource(\n type='command' if path_or_cmd.endswith('|') else 'file',\n channels=[0],\n source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd\n )\n ],\n sampling_rate=sampling_rate,\n num_samples=int(durations[recording_id] * sampling_rate),\n duration=durations[recording_id]\n )\n for recording_id, path_or_cmd in recordings.items()\n )\n\n # must exist for SupervisionSet\n segments = path / 'segments'\n if not segments.is_file():\n return audio_set, None\n\n with segments.open() as f:\n supervision_segments = [l.strip().split() for l in f]\n\n texts = load_kaldi_text_mapping(path / 'text')\n speakers = load_kaldi_text_mapping(path / 'utt2spk')\n genders = load_kaldi_text_mapping(path / 'spk2gender')\n languages = load_kaldi_text_mapping(path / 'utt2lang')\n\n supervision_set = SupervisionSet.from_segments(\n SupervisionSegment(\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n duration=float(duration) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n for segment_id, recording_id, start, duration in supervision_segments\n )\n\n return audio_set, supervision_set\n\n\ndef load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]:\n \"\"\"Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict.\"\"\"\n mapping = defaultdict(lambda: None)\n if path.is_file():\n with path.open() as f:\n mapping = dict(line.strip().split(' ', maxsplit=1) for line in f)\n elif must_exist:\n raise ValueError(f\"No such file: {path}\")\n return mapping\n", "path": "lhotse/kaldi.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Optional, Tuple\n\nfrom lhotse.audio import AudioSource, Recording, RecordingSet\nfrom lhotse.supervision import SupervisionSegment, SupervisionSet\nfrom lhotse.utils import Pathlike\n\n\ndef load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]:\n \"\"\"\n Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests.\n For this to work, at least the wav.scp file must exist.\n SupervisionSet is created only when a segments file exists.\n All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet.\n In particular, feats.scp files are ignored.\n \"\"\"\n path = Path(path)\n assert path.is_dir()\n\n # must exist for RecordingSet\n recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True)\n\n durations = defaultdict(float)\n reco2dur = path / 'reco2dur'\n if not reco2dur.is_file():\n raise ValueError(f\"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>\")\n with reco2dur.open() as f:\n for line in f:\n recording_id, dur = line.strip().split()\n durations[recording_id] = float(dur)\n\n audio_set = RecordingSet.from_recordings(\n Recording(\n id=recording_id,\n sources=[\n AudioSource(\n type='command' if path_or_cmd.endswith('|') else 'file',\n channels=[0],\n source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd\n )\n ],\n sampling_rate=sampling_rate,\n num_samples=int(durations[recording_id] * sampling_rate),\n duration=durations[recording_id]\n )\n for recording_id, path_or_cmd in recordings.items()\n )\n\n # must exist for SupervisionSet\n segments = path / 'segments'\n if not segments.is_file():\n return audio_set, None\n\n with segments.open() as f:\n supervision_segments = [l.strip().split() for l in f]\n\n texts = load_kaldi_text_mapping(path / 'text')\n speakers = load_kaldi_text_mapping(path / 'utt2spk')\n genders = load_kaldi_text_mapping(path / 'spk2gender')\n languages = load_kaldi_text_mapping(path / 'utt2lang')\n\n supervision_set = SupervisionSet.from_segments(\n SupervisionSegment(\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n duration=float(end) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n for segment_id, recording_id, start, end in supervision_segments\n )\n\n return audio_set, supervision_set\n\n\ndef load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]:\n \"\"\"Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict.\"\"\"\n mapping = defaultdict(lambda: None)\n if path.is_file():\n with path.open() as f:\n mapping = dict(line.strip().split(' ', maxsplit=1) for line in f)\n elif must_exist:\n raise ValueError(f\"No such file: {path}\")\n return mapping\n", "path": "lhotse/kaldi.py"}]} | 1,343 | 175 |
gh_patches_debug_3926 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-41 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problem with flask_jwt
I'm trying to use the ScoutApp in an flask application using the flask_jwt module, but i recive the below error message when my loggin route are called:
```jsx
[2018-05-30 13:06:20,984] ERROR in __init__: Traceback (most recent call last):
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py", line 1639, in full_dispatch_request
rv = self.dispatch_request()
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 69, in dispatch_request
result = view_func(**req.view_args)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py", line 213, in __call__
return self._eop_wrapper_(self.__subject__, *args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 119, in tracing_function
raise e
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 116, in tracing_function
return original(*args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py", line 115, in _default_auth_request_handler
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
AttributeError: 'NoneType' object has no attribute 'get'
No user info
Request: OPTIONS /api/auth
QueryString: b''
IP: 127.0.0.1
Agent: linux | chrome 66.0.3359.181
Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36
Payload: b''
ERROR:condominio:Traceback (most recent call last):
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py", line 1639, in full_dispatch_request
rv = self.dispatch_request()
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 69, in dispatch_request
result = view_func(**req.view_args)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py", line 213, in __call__
return self._eop_wrapper_(self.__subject__, *args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 119, in tracing_function
raise e
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 116, in tracing_function
return original(*args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py", line 115, in _default_auth_request_handler
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
AttributeError: 'NoneType' object has no attribute 'get'
No user info
Request: OPTIONS /api/auth
QueryString: b''
IP: 127.0.0.1
Agent: linux | chrome 66.0.3359.181
Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36
Payload: b''
INFO:werkzeug:127.0.0.1 - - [30/May/2018 13:06:23] "OPTIONS /api/auth HTTP/1.1" 500
```
Debugging the problem, apparently the JWT can't get the request data to do the authentication. For some reason the request lose it the data when ScoutApp are enabled.
The JWT code that breaks:
```python
def _default_auth_request_handler():
data = request.get_json()
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
password = data.get(current_app.config.get('JWT_AUTH_PASSWORD_KEY'), None)
criterion = [username, password, len(data) == 2]
if not all(criterion):
raise JWTError('Bad Request', 'Invalid credentials')
identity = _jwt.authentication_callback(username, password)
if identity:
access_token = _jwt.jwt_encode_callback(identity)
return _jwt.auth_response_callback(access_token, identity)
else:
raise JWTError('Bad Request', 'Invalid credentials')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/flask/__init__.py`
Content:
```
1 from flask import current_app
2 from flask.globals import _request_ctx_stack
3
4 import scout_apm.core
5 from scout_apm.core.config.config import ScoutConfig
6 from scout_apm.core.tracked_request import TrackedRequest
7 from scout_apm.core.monkey import CallableProxy
8
9
10 class ScoutApm(object):
11 def __init__(self, app=None):
12 self.app = app
13 if app is not None:
14 self.init_app(app)
15
16 def init_app(self, app):
17 app.before_first_request(self.before_first_request)
18 app.before_request(self.process_request)
19 app.after_request(self.process_response)
20
21 # Monkey-patch the Flask.dispatch_request method
22 app.dispatch_request = self.dispatch_request
23
24 #############
25 # Startup #
26 #############
27
28 def before_first_request(self):
29 self.extract_flask_settings()
30 scout_apm.core.install()
31
32 def extract_flask_settings(self):
33 """
34 Copies SCOUT_* settings in the app into Scout's config lookup
35 """
36 configs = {}
37 for name in filter(lambda x: x.startswith('SCOUT_'), current_app.config):
38 value = current_app.config[name]
39 clean_name = name.replace('SCOUT_', '').lower()
40 configs[clean_name] = value
41 ScoutConfig.set(**configs)
42
43 #############################
44 # Request Lifecycle hooks #
45 #############################
46
47 def dispatch_request(self):
48 """Modified version of Flask.dispatch_request to call process_view."""
49
50 req = _request_ctx_stack.top.request
51 app = current_app
52
53 if req.routing_exception is not None:
54 app.raise_routing_exception(req)
55
56 # The routing rule has some handy attributes to extract how Flask found
57 # this endpoint
58 rule = req.url_rule
59
60 # Wrap the real view_func
61 view_func = self.wrap_view_func(
62 app,
63 rule,
64 req,
65 app.view_functions[rule.endpoint],
66 req.view_args)
67
68 return view_func(**req.view_args)
69
70 def process_request(self):
71 TrackedRequest.instance().start_span(operation='Flask')
72
73 def wrap_view_func(self, app, rule, req, view_func, view_kwargs):
74 """ This method is called just before the flask view is called.
75 This is done by the dispatch_request method.
76 """
77 operation = view_func.__module__ + '.' + view_func.__name__
78 return self.trace_view_function(
79 view_func,
80 ('Controller', {'path': req.path, 'name': operation}))
81
82 def trace_view_function(self, func, info):
83 try:
84 def tracing_function(original, *args, **kwargs):
85 entry_type, detail = info
86
87 operation = entry_type
88 if detail['name'] is not None:
89 operation = operation + '/' + detail['name']
90
91 tr = TrackedRequest.instance()
92 tr.mark_real_request()
93 span = tr.start_span(operation=operation)
94
95 for key in detail:
96 span.tag(key, detail[key])
97
98 # And the custom View stuff
99 # request = args[0]
100
101 # Extract headers
102 # regex = re.compile('^HTTP_')
103 # headers = dict((regex.sub('', header), value) for (header, value)
104 # in request.META.items() if header.startswith('HTTP_'))
105
106 # span.tag('remote_addr', request.META['REMOTE_ADDR'])
107
108 try:
109 return original(*args, **kwargs)
110 except Exception as e:
111 TrackedRequest.instance().tag('error', 'true')
112 raise e
113 finally:
114 TrackedRequest.instance().stop_span()
115
116 return CallableProxy(func, tracing_function)
117 except Exception as err:
118 # If we can't wrap for any reason, just return the original
119 return func
120
121 def process_response(self, response):
122 TrackedRequest.instance().stop_span()
123 return response
124
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/flask/__init__.py b/src/scout_apm/flask/__init__.py
--- a/src/scout_apm/flask/__init__.py
+++ b/src/scout_apm/flask/__init__.py
@@ -50,6 +50,10 @@
req = _request_ctx_stack.top.request
app = current_app
+ # Return flask's default options response. See issue #40
+ if req.method == 'OPTIONS':
+ return app.make_default_options_response()
+
if req.routing_exception is not None:
app.raise_routing_exception(req)
| {"golden_diff": "diff --git a/src/scout_apm/flask/__init__.py b/src/scout_apm/flask/__init__.py\n--- a/src/scout_apm/flask/__init__.py\n+++ b/src/scout_apm/flask/__init__.py\n@@ -50,6 +50,10 @@\n req = _request_ctx_stack.top.request\n app = current_app\n \n+ # Return flask's default options response. See issue #40\n+ if req.method == 'OPTIONS':\n+ return app.make_default_options_response()\n+\n if req.routing_exception is not None:\n app.raise_routing_exception(req)\n", "issue": "Problem with flask_jwt\nI'm trying to use the ScoutApp in an flask application using the flask_jwt module, but i recive the below error message when my loggin route are called:\r\n\r\n```jsx\r\n[2018-05-30 13:06:20,984] ERROR in __init__: Traceback (most recent call last):\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py\", line 1639, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 69, in dispatch_request\r\n result = view_func(**req.view_args)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py\", line 213, in __call__\r\n return self._eop_wrapper_(self.__subject__, *args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 119, in tracing_function\r\n raise e\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 116, in tracing_function\r\n return original(*args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py\", line 115, in _default_auth_request_handler\r\n username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)\r\n\r\nAttributeError: 'NoneType' object has no attribute 'get'\r\n No user info\r\n Request: OPTIONS /api/auth\r\n QueryString: b''\r\n IP: 127.0.0.1\r\n Agent: linux | chrome 66.0.3359.181\r\n Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\r\n Payload: b''\r\n \r\nERROR:condominio:Traceback (most recent call last):\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py\", line 1639, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 69, in dispatch_request\r\n result = view_func(**req.view_args)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py\", line 213, in __call__\r\n return self._eop_wrapper_(self.__subject__, *args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 119, in tracing_function\r\n raise e\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 116, in tracing_function\r\n return original(*args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py\", line 115, in _default_auth_request_handler\r\n username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)\r\n\r\nAttributeError: 'NoneType' object has no attribute 'get'\r\n No user info\r\n Request: OPTIONS /api/auth\r\n QueryString: b''\r\n IP: 127.0.0.1\r\n Agent: linux | chrome 66.0.3359.181\r\n Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\r\n Payload: b''\r\n \r\nINFO:werkzeug:127.0.0.1 - - [30/May/2018 13:06:23] \"OPTIONS /api/auth HTTP/1.1\" 500 \r\n```\r\n\r\nDebugging the problem, apparently the JWT can't get the request data to do the authentication. For some reason the request lose it the data when ScoutApp are enabled.\r\n\r\nThe JWT code that breaks:\r\n```python\r\ndef _default_auth_request_handler():\r\n data = request.get_json()\r\n username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)\r\n password = data.get(current_app.config.get('JWT_AUTH_PASSWORD_KEY'), None)\r\n criterion = [username, password, len(data) == 2]\r\n\r\n if not all(criterion):\r\n raise JWTError('Bad Request', 'Invalid credentials')\r\n\r\n identity = _jwt.authentication_callback(username, password)\r\n\r\n if identity:\r\n access_token = _jwt.jwt_encode_callback(identity)\r\n return _jwt.auth_response_callback(access_token, identity)\r\n else:\r\n raise JWTError('Bad Request', 'Invalid credentials')\r\n```\r\n\n", "before_files": [{"content": "from flask import current_app\nfrom flask.globals import _request_ctx_stack\n\nimport scout_apm.core\nfrom scout_apm.core.config.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.monkey import CallableProxy\n\n\nclass ScoutApm(object):\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.before_first_request(self.before_first_request)\n app.before_request(self.process_request)\n app.after_request(self.process_response)\n\n # Monkey-patch the Flask.dispatch_request method\n app.dispatch_request = self.dispatch_request\n\n #############\n # Startup #\n #############\n\n def before_first_request(self):\n self.extract_flask_settings()\n scout_apm.core.install()\n\n def extract_flask_settings(self):\n \"\"\"\n Copies SCOUT_* settings in the app into Scout's config lookup\n \"\"\"\n configs = {}\n for name in filter(lambda x: x.startswith('SCOUT_'), current_app.config):\n value = current_app.config[name]\n clean_name = name.replace('SCOUT_', '').lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n #############################\n # Request Lifecycle hooks #\n #############################\n\n def dispatch_request(self):\n \"\"\"Modified version of Flask.dispatch_request to call process_view.\"\"\"\n\n req = _request_ctx_stack.top.request\n app = current_app\n\n if req.routing_exception is not None:\n app.raise_routing_exception(req)\n\n # The routing rule has some handy attributes to extract how Flask found\n # this endpoint\n rule = req.url_rule\n\n # Wrap the real view_func\n view_func = self.wrap_view_func(\n app,\n rule,\n req,\n app.view_functions[rule.endpoint],\n req.view_args)\n\n return view_func(**req.view_args)\n\n def process_request(self):\n TrackedRequest.instance().start_span(operation='Flask')\n\n def wrap_view_func(self, app, rule, req, view_func, view_kwargs):\n \"\"\" This method is called just before the flask view is called.\n This is done by the dispatch_request method.\n \"\"\"\n operation = view_func.__module__ + '.' + view_func.__name__\n return self.trace_view_function(\n view_func,\n ('Controller', {'path': req.path, 'name': operation}))\n\n def trace_view_function(self, func, info):\n try:\n def tracing_function(original, *args, **kwargs):\n entry_type, detail = info\n\n operation = entry_type\n if detail['name'] is not None:\n operation = operation + '/' + detail['name']\n\n tr = TrackedRequest.instance()\n tr.mark_real_request()\n span = tr.start_span(operation=operation)\n\n for key in detail:\n span.tag(key, detail[key])\n\n # And the custom View stuff\n # request = args[0]\n\n # Extract headers\n # regex = re.compile('^HTTP_')\n # headers = dict((regex.sub('', header), value) for (header, value)\n # in request.META.items() if header.startswith('HTTP_'))\n\n # span.tag('remote_addr', request.META['REMOTE_ADDR'])\n\n try:\n return original(*args, **kwargs)\n except Exception as e:\n TrackedRequest.instance().tag('error', 'true')\n raise e\n finally:\n TrackedRequest.instance().stop_span()\n\n return CallableProxy(func, tracing_function)\n except Exception as err:\n # If we can't wrap for any reason, just return the original\n return func\n\n def process_response(self, response):\n TrackedRequest.instance().stop_span()\n return response\n\n", "path": "src/scout_apm/flask/__init__.py"}], "after_files": [{"content": "from flask import current_app\nfrom flask.globals import _request_ctx_stack\n\nimport scout_apm.core\nfrom scout_apm.core.config.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.monkey import CallableProxy\n\n\nclass ScoutApm(object):\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.before_first_request(self.before_first_request)\n app.before_request(self.process_request)\n app.after_request(self.process_response)\n\n # Monkey-patch the Flask.dispatch_request method\n app.dispatch_request = self.dispatch_request\n\n #############\n # Startup #\n #############\n\n def before_first_request(self):\n self.extract_flask_settings()\n scout_apm.core.install()\n\n def extract_flask_settings(self):\n \"\"\"\n Copies SCOUT_* settings in the app into Scout's config lookup\n \"\"\"\n configs = {}\n for name in filter(lambda x: x.startswith('SCOUT_'), current_app.config):\n value = current_app.config[name]\n clean_name = name.replace('SCOUT_', '').lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n #############################\n # Request Lifecycle hooks #\n #############################\n\n def dispatch_request(self):\n \"\"\"Modified version of Flask.dispatch_request to call process_view.\"\"\"\n\n req = _request_ctx_stack.top.request\n app = current_app\n\n # Return flask's default options response. See issue #40\n if req.method == 'OPTIONS':\n return app.make_default_options_response()\n\n if req.routing_exception is not None:\n app.raise_routing_exception(req)\n\n # The routing rule has some handy attributes to extract how Flask found\n # this endpoint\n rule = req.url_rule\n\n # Wrap the real view_func\n view_func = self.wrap_view_func(\n app,\n rule,\n req,\n app.view_functions[rule.endpoint],\n req.view_args)\n\n return view_func(**req.view_args)\n\n def process_request(self):\n TrackedRequest.instance().start_span(operation='Flask')\n\n def wrap_view_func(self, app, rule, req, view_func, view_kwargs):\n \"\"\" This method is called just before the flask view is called.\n This is done by the dispatch_request method.\n \"\"\"\n operation = view_func.__module__ + '.' + view_func.__name__\n return self.trace_view_function(\n view_func,\n ('Controller', {'path': req.path, 'name': operation}))\n\n def trace_view_function(self, func, info):\n try:\n def tracing_function(original, *args, **kwargs):\n entry_type, detail = info\n\n operation = entry_type\n if detail['name'] is not None:\n operation = operation + '/' + detail['name']\n\n tr = TrackedRequest.instance()\n tr.mark_real_request()\n span = tr.start_span(operation=operation)\n\n for key in detail:\n span.tag(key, detail[key])\n\n # And the custom View stuff\n # request = args[0]\n\n # Extract headers\n # regex = re.compile('^HTTP_')\n # headers = dict((regex.sub('', header), value) for (header, value)\n # in request.META.items() if header.startswith('HTTP_'))\n\n # span.tag('remote_addr', request.META['REMOTE_ADDR'])\n\n try:\n return original(*args, **kwargs)\n except Exception as e:\n TrackedRequest.instance().tag('error', 'true')\n raise e\n finally:\n TrackedRequest.instance().stop_span()\n\n return CallableProxy(func, tracing_function)\n except Exception as err:\n # If we can't wrap for any reason, just return the original\n return func\n\n def process_response(self, response):\n TrackedRequest.instance().stop_span()\n return response\n\n", "path": "src/scout_apm/flask/__init__.py"}]} | 2,574 | 136 |
gh_patches_debug_8194 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-6402 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve regex for semver in automation rules
We have a very basic pattern, but on the semver faq they have a better one
https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
This needs to be changed and with a comment to the link from where it came from.
https://github.com/readthedocs/readthedocs.org/blob/ff0ad67991e599f79e9d956a3733785ccb78c505/readthedocs/builds/constants.py#L113-L113
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/builds/constants.py`
Content:
```
1 """Constants for the builds app."""
2
3 from django.conf import settings
4 from django.utils.translation import ugettext_lazy as _
5
6
7 BUILD_STATE_TRIGGERED = 'triggered'
8 BUILD_STATE_CLONING = 'cloning'
9 BUILD_STATE_INSTALLING = 'installing'
10 BUILD_STATE_BUILDING = 'building'
11 BUILD_STATE_FINISHED = 'finished'
12
13 BUILD_STATE = (
14 (BUILD_STATE_TRIGGERED, _('Triggered')),
15 (BUILD_STATE_CLONING, _('Cloning')),
16 (BUILD_STATE_INSTALLING, _('Installing')),
17 (BUILD_STATE_BUILDING, _('Building')),
18 (BUILD_STATE_FINISHED, _('Finished')),
19 )
20
21 BUILD_TYPES = (
22 ('html', _('HTML')),
23 ('pdf', _('PDF')),
24 ('epub', _('Epub')),
25 # There is currently no support for building man/dash formats, but we keep
26 # it there since the DB might still contain those values for legacy
27 # projects.
28 ('man', _('Manpage')),
29 ('dash', _('Dash')),
30 )
31
32 # Manager name for Internal Versions or Builds.
33 # ie: Versions and Builds Excluding pull request/merge request Versions and Builds.
34 INTERNAL = 'internal'
35 # Manager name for External Versions or Builds.
36 # ie: Only pull request/merge request Versions and Builds.
37 EXTERNAL = 'external'
38 EXTERNAL_TEXT = _('External')
39
40 BRANCH = 'branch'
41 BRANCH_TEXT = _('Branch')
42 TAG = 'tag'
43 TAG_TEXT = _('Tag')
44 UNKNOWN = 'unknown'
45 UNKNOWN_TEXT = _('Unknown')
46
47 VERSION_TYPES = (
48 (BRANCH, BRANCH_TEXT),
49 (TAG, TAG_TEXT),
50 (EXTERNAL, EXTERNAL_TEXT),
51 (UNKNOWN, UNKNOWN_TEXT),
52 )
53
54 LATEST = settings.RTD_LATEST
55 LATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME
56
57 STABLE = settings.RTD_STABLE
58 STABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME
59
60 # Those names are specialcased version names. They do not correspond to
61 # branches/tags in a project's repository.
62 NON_REPOSITORY_VERSIONS = (
63 LATEST,
64 STABLE,
65 )
66
67 # General Build Statuses
68 BUILD_STATUS_FAILURE = 'failed'
69 BUILD_STATUS_PENDING = 'pending'
70 BUILD_STATUS_SUCCESS = 'success'
71
72 # GitHub Build Statuses
73 GITHUB_BUILD_STATUS_FAILURE = 'failure'
74 GITHUB_BUILD_STATUS_PENDING = 'pending'
75 GITHUB_BUILD_STATUS_SUCCESS = 'success'
76
77 # GitLab Build Statuses
78 GITLAB_BUILD_STATUS_FAILURE = 'failed'
79 GITLAB_BUILD_STATUS_PENDING = 'pending'
80 GITLAB_BUILD_STATUS_SUCCESS = 'success'
81
82 # Used to select correct Build status and description to be sent to each service API
83 SELECT_BUILD_STATUS = {
84 BUILD_STATUS_FAILURE: {
85 'github': GITHUB_BUILD_STATUS_FAILURE,
86 'gitlab': GITLAB_BUILD_STATUS_FAILURE,
87 'description': 'Read the Docs build failed!',
88 },
89 BUILD_STATUS_PENDING: {
90 'github': GITHUB_BUILD_STATUS_PENDING,
91 'gitlab': GITLAB_BUILD_STATUS_PENDING,
92 'description': 'Read the Docs build is in progress!',
93 },
94 BUILD_STATUS_SUCCESS: {
95 'github': GITHUB_BUILD_STATUS_SUCCESS,
96 'gitlab': GITLAB_BUILD_STATUS_SUCCESS,
97 'description': 'Read the Docs build succeeded!',
98 },
99 }
100
101 RTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs'
102
103 GITHUB_EXTERNAL_VERSION_NAME = 'Pull Request'
104 GITLAB_EXTERNAL_VERSION_NAME = 'Merge Request'
105 GENERIC_EXTERNAL_VERSION_NAME = 'External Version'
106
107
108 # Automation rules
109
110 ALL_VERSIONS = 'all-versions'
111 ALL_VERSIONS_REGEX = r'.*'
112 SEMVER_VERSIONS = 'semver-versions'
113 SEMVER_VERSIONS_REGEX = r'^v?(\d+\.)(\d+\.)(\d+)(-.+)?$'
114
115
116 PREDEFINED_MATCH_ARGS = (
117 (ALL_VERSIONS, _('Any version')),
118 (SEMVER_VERSIONS, _('SemVer versions')),
119 (None, _('Custom match')),
120 )
121
122 PREDEFINED_MATCH_ARGS_VALUES = {
123 ALL_VERSIONS: ALL_VERSIONS_REGEX,
124 SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX,
125 }
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/builds/constants.py b/readthedocs/builds/constants.py
--- a/readthedocs/builds/constants.py
+++ b/readthedocs/builds/constants.py
@@ -110,7 +110,10 @@
ALL_VERSIONS = 'all-versions'
ALL_VERSIONS_REGEX = r'.*'
SEMVER_VERSIONS = 'semver-versions'
-SEMVER_VERSIONS_REGEX = r'^v?(\d+\.)(\d+\.)(\d+)(-.+)?$'
+
+# Pattern referred from
+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
+SEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$' # noqa
PREDEFINED_MATCH_ARGS = (
| {"golden_diff": "diff --git a/readthedocs/builds/constants.py b/readthedocs/builds/constants.py\n--- a/readthedocs/builds/constants.py\n+++ b/readthedocs/builds/constants.py\n@@ -110,7 +110,10 @@\n ALL_VERSIONS = 'all-versions'\n ALL_VERSIONS_REGEX = r'.*'\n SEMVER_VERSIONS = 'semver-versions'\n-SEMVER_VERSIONS_REGEX = r'^v?(\\d+\\.)(\\d+\\.)(\\d+)(-.+)?$'\n+\n+# Pattern referred from\n+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\n+SEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$' # noqa\n \n \n PREDEFINED_MATCH_ARGS = (\n", "issue": "Improve regex for semver in automation rules\nWe have a very basic pattern, but on the semver faq they have a better one\r\n\r\nhttps://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\r\n\r\nThis needs to be changed and with a comment to the link from where it came from.\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/ff0ad67991e599f79e9d956a3733785ccb78c505/readthedocs/builds/constants.py#L113-L113\n", "before_files": [{"content": "\"\"\"Constants for the builds app.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nBUILD_STATE_TRIGGERED = 'triggered'\nBUILD_STATE_CLONING = 'cloning'\nBUILD_STATE_INSTALLING = 'installing'\nBUILD_STATE_BUILDING = 'building'\nBUILD_STATE_FINISHED = 'finished'\n\nBUILD_STATE = (\n (BUILD_STATE_TRIGGERED, _('Triggered')),\n (BUILD_STATE_CLONING, _('Cloning')),\n (BUILD_STATE_INSTALLING, _('Installing')),\n (BUILD_STATE_BUILDING, _('Building')),\n (BUILD_STATE_FINISHED, _('Finished')),\n)\n\nBUILD_TYPES = (\n ('html', _('HTML')),\n ('pdf', _('PDF')),\n ('epub', _('Epub')),\n # There is currently no support for building man/dash formats, but we keep\n # it there since the DB might still contain those values for legacy\n # projects.\n ('man', _('Manpage')),\n ('dash', _('Dash')),\n)\n\n# Manager name for Internal Versions or Builds.\n# ie: Versions and Builds Excluding pull request/merge request Versions and Builds.\nINTERNAL = 'internal'\n# Manager name for External Versions or Builds.\n# ie: Only pull request/merge request Versions and Builds.\nEXTERNAL = 'external'\nEXTERNAL_TEXT = _('External')\n\nBRANCH = 'branch'\nBRANCH_TEXT = _('Branch')\nTAG = 'tag'\nTAG_TEXT = _('Tag')\nUNKNOWN = 'unknown'\nUNKNOWN_TEXT = _('Unknown')\n\nVERSION_TYPES = (\n (BRANCH, BRANCH_TEXT),\n (TAG, TAG_TEXT),\n (EXTERNAL, EXTERNAL_TEXT),\n (UNKNOWN, UNKNOWN_TEXT),\n)\n\nLATEST = settings.RTD_LATEST\nLATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME\n\nSTABLE = settings.RTD_STABLE\nSTABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME\n\n# Those names are specialcased version names. They do not correspond to\n# branches/tags in a project's repository.\nNON_REPOSITORY_VERSIONS = (\n LATEST,\n STABLE,\n)\n\n# General Build Statuses\nBUILD_STATUS_FAILURE = 'failed'\nBUILD_STATUS_PENDING = 'pending'\nBUILD_STATUS_SUCCESS = 'success'\n\n# GitHub Build Statuses\nGITHUB_BUILD_STATUS_FAILURE = 'failure'\nGITHUB_BUILD_STATUS_PENDING = 'pending'\nGITHUB_BUILD_STATUS_SUCCESS = 'success'\n\n# GitLab Build Statuses\nGITLAB_BUILD_STATUS_FAILURE = 'failed'\nGITLAB_BUILD_STATUS_PENDING = 'pending'\nGITLAB_BUILD_STATUS_SUCCESS = 'success'\n\n# Used to select correct Build status and description to be sent to each service API\nSELECT_BUILD_STATUS = {\n BUILD_STATUS_FAILURE: {\n 'github': GITHUB_BUILD_STATUS_FAILURE,\n 'gitlab': GITLAB_BUILD_STATUS_FAILURE,\n 'description': 'Read the Docs build failed!',\n },\n BUILD_STATUS_PENDING: {\n 'github': GITHUB_BUILD_STATUS_PENDING,\n 'gitlab': GITLAB_BUILD_STATUS_PENDING,\n 'description': 'Read the Docs build is in progress!',\n },\n BUILD_STATUS_SUCCESS: {\n 'github': GITHUB_BUILD_STATUS_SUCCESS,\n 'gitlab': GITLAB_BUILD_STATUS_SUCCESS,\n 'description': 'Read the Docs build succeeded!',\n },\n}\n\nRTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs'\n\nGITHUB_EXTERNAL_VERSION_NAME = 'Pull Request'\nGITLAB_EXTERNAL_VERSION_NAME = 'Merge Request'\nGENERIC_EXTERNAL_VERSION_NAME = 'External Version'\n\n\n# Automation rules\n\nALL_VERSIONS = 'all-versions'\nALL_VERSIONS_REGEX = r'.*'\nSEMVER_VERSIONS = 'semver-versions'\nSEMVER_VERSIONS_REGEX = r'^v?(\\d+\\.)(\\d+\\.)(\\d+)(-.+)?$'\n\n\nPREDEFINED_MATCH_ARGS = (\n (ALL_VERSIONS, _('Any version')),\n (SEMVER_VERSIONS, _('SemVer versions')),\n (None, _('Custom match')),\n)\n\nPREDEFINED_MATCH_ARGS_VALUES = {\n ALL_VERSIONS: ALL_VERSIONS_REGEX,\n SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX,\n}\n", "path": "readthedocs/builds/constants.py"}], "after_files": [{"content": "\"\"\"Constants for the builds app.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nBUILD_STATE_TRIGGERED = 'triggered'\nBUILD_STATE_CLONING = 'cloning'\nBUILD_STATE_INSTALLING = 'installing'\nBUILD_STATE_BUILDING = 'building'\nBUILD_STATE_FINISHED = 'finished'\n\nBUILD_STATE = (\n (BUILD_STATE_TRIGGERED, _('Triggered')),\n (BUILD_STATE_CLONING, _('Cloning')),\n (BUILD_STATE_INSTALLING, _('Installing')),\n (BUILD_STATE_BUILDING, _('Building')),\n (BUILD_STATE_FINISHED, _('Finished')),\n)\n\nBUILD_TYPES = (\n ('html', _('HTML')),\n ('pdf', _('PDF')),\n ('epub', _('Epub')),\n # There is currently no support for building man/dash formats, but we keep\n # it there since the DB might still contain those values for legacy\n # projects.\n ('man', _('Manpage')),\n ('dash', _('Dash')),\n)\n\n# Manager name for Internal Versions or Builds.\n# ie: Versions and Builds Excluding pull request/merge request Versions and Builds.\nINTERNAL = 'internal'\n# Manager name for External Versions or Builds.\n# ie: Only pull request/merge request Versions and Builds.\nEXTERNAL = 'external'\nEXTERNAL_TEXT = _('External')\n\nBRANCH = 'branch'\nBRANCH_TEXT = _('Branch')\nTAG = 'tag'\nTAG_TEXT = _('Tag')\nUNKNOWN = 'unknown'\nUNKNOWN_TEXT = _('Unknown')\n\nVERSION_TYPES = (\n (BRANCH, BRANCH_TEXT),\n (TAG, TAG_TEXT),\n (EXTERNAL, EXTERNAL_TEXT),\n (UNKNOWN, UNKNOWN_TEXT),\n)\n\nLATEST = settings.RTD_LATEST\nLATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME\n\nSTABLE = settings.RTD_STABLE\nSTABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME\n\n# Those names are specialcased version names. They do not correspond to\n# branches/tags in a project's repository.\nNON_REPOSITORY_VERSIONS = (\n LATEST,\n STABLE,\n)\n\n# General Build Statuses\nBUILD_STATUS_FAILURE = 'failed'\nBUILD_STATUS_PENDING = 'pending'\nBUILD_STATUS_SUCCESS = 'success'\n\n# GitHub Build Statuses\nGITHUB_BUILD_STATUS_FAILURE = 'failure'\nGITHUB_BUILD_STATUS_PENDING = 'pending'\nGITHUB_BUILD_STATUS_SUCCESS = 'success'\n\n# GitLab Build Statuses\nGITLAB_BUILD_STATUS_FAILURE = 'failed'\nGITLAB_BUILD_STATUS_PENDING = 'pending'\nGITLAB_BUILD_STATUS_SUCCESS = 'success'\n\n# Used to select correct Build status and description to be sent to each service API\nSELECT_BUILD_STATUS = {\n BUILD_STATUS_FAILURE: {\n 'github': GITHUB_BUILD_STATUS_FAILURE,\n 'gitlab': GITLAB_BUILD_STATUS_FAILURE,\n 'description': 'Read the Docs build failed!',\n },\n BUILD_STATUS_PENDING: {\n 'github': GITHUB_BUILD_STATUS_PENDING,\n 'gitlab': GITLAB_BUILD_STATUS_PENDING,\n 'description': 'Read the Docs build is in progress!',\n },\n BUILD_STATUS_SUCCESS: {\n 'github': GITHUB_BUILD_STATUS_SUCCESS,\n 'gitlab': GITLAB_BUILD_STATUS_SUCCESS,\n 'description': 'Read the Docs build succeeded!',\n },\n}\n\nRTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs'\n\nGITHUB_EXTERNAL_VERSION_NAME = 'Pull Request'\nGITLAB_EXTERNAL_VERSION_NAME = 'Merge Request'\nGENERIC_EXTERNAL_VERSION_NAME = 'External Version'\n\n\n# Automation rules\n\nALL_VERSIONS = 'all-versions'\nALL_VERSIONS_REGEX = r'.*'\nSEMVER_VERSIONS = 'semver-versions'\n\n# Pattern referred from\n# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\nSEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$' # noqa\n\n\nPREDEFINED_MATCH_ARGS = (\n (ALL_VERSIONS, _('Any version')),\n (SEMVER_VERSIONS, _('SemVer versions')),\n (None, _('Custom match')),\n)\n\nPREDEFINED_MATCH_ARGS_VALUES = {\n ALL_VERSIONS: ALL_VERSIONS_REGEX,\n SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX,\n}\n", "path": "readthedocs/builds/constants.py"}]} | 1,530 | 281 |
gh_patches_debug_56402 | rasdani/github-patches | git_diff | encode__httpx-361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing py.typed declaration?
`mypy` is complaining about not being able to find type annotations for `httpx`:
`error: Cannot find module named 'httpx'`
I'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 license="BSD",
44 description="The next generation HTTP client.",
45 long_description=get_long_description(),
46 long_description_content_type="text/markdown",
47 author="Tom Christie",
48 author_email="[email protected]",
49 package_data={"httpx": ["py.typed"]},
50 packages=get_packages("httpx"),
51 include_package_data=True,
52 install_requires=[
53 "certifi",
54 "chardet==3.*",
55 "h11==0.8.*",
56 "h2==3.*",
57 "hstspreload>=2019.8.27",
58 "idna==2.*",
59 "rfc3986==1.*",
60 ],
61 classifiers=[
62 "Development Status :: 3 - Alpha",
63 "Environment :: Web Environment",
64 "Intended Audience :: Developers",
65 "License :: OSI Approved :: BSD License",
66 "Operating System :: OS Independent",
67 "Topic :: Internet :: WWW/HTTP",
68 "Programming Language :: Python :: 3",
69 "Programming Language :: Python :: 3.6",
70 "Programming Language :: Python :: 3.7",
71 "Programming Language :: Python :: 3.8",
72 ],
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -49,6 +49,7 @@
package_data={"httpx": ["py.typed"]},
packages=get_packages("httpx"),
include_package_data=True,
+ zip_safe=False,
install_requires=[
"certifi",
"chardet==3.*",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,6 +49,7 @@\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n+ zip_safe=False,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n", "issue": "Missing py.typed declaration?\n`mypy` is complaining about not being able to find type annotations for `httpx`: \r\n\r\n`error: Cannot find module named 'httpx'`\r\n\r\nI'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload>=2019.8.27\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload>=2019.8.27\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]} | 1,005 | 83 |
gh_patches_debug_2399 | rasdani/github-patches | git_diff | WeblateOrg__weblate-1655 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
File download is outdated
### Steps to reproduce
1. Edit string.
2. Donwload original translation file (without conversion).
### Actual behaviour
The file does not have recent changes.
### Expected behaviour
All changes should be reflected.
### Server configuration
Current master
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/trans/views/helper.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © 2012 - 2017 Michal Čihař <[email protected]>
4 #
5 # This file is part of Weblate <https://weblate.org/>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <https://www.gnu.org/licenses/>.
19 #
20 """Helper methods for views."""
21
22 from django.http import HttpResponse, Http404
23 from django.shortcuts import get_object_or_404
24 import django.utils.translation
25 from django.utils.translation import trans_real, ugettext as _
26
27 from weblate.utils import messages
28 from weblate.permissions.helpers import check_access
29 from weblate.trans.exporters import get_exporter
30 from weblate.trans.models import Project, SubProject, Translation
31
32
33 def get_translation(request, project, subproject, lang, skip_acl=False):
34 """Return translation matching parameters."""
35 translation = get_object_or_404(
36 Translation.objects.prefetch(),
37 language__code=lang,
38 subproject__slug=subproject,
39 subproject__project__slug=project,
40 enabled=True
41 )
42 if not skip_acl:
43 check_access(request, translation.subproject.project)
44 return translation
45
46
47 def get_subproject(request, project, subproject, skip_acl=False):
48 """Return subproject matching parameters."""
49 subproject = get_object_or_404(
50 SubProject.objects.prefetch(),
51 project__slug=project,
52 slug=subproject
53 )
54 if not skip_acl:
55 check_access(request, subproject.project)
56 return subproject
57
58
59 def get_project(request, project, skip_acl=False):
60 """Return project matching parameters."""
61 project = get_object_or_404(
62 Project,
63 slug=project,
64 )
65 if not skip_acl:
66 check_access(request, project)
67 return project
68
69
70 def get_project_translation(request, project=None, subproject=None, lang=None):
71 """Return project, subproject, translation tuple for given parameters."""
72
73 if lang is not None and subproject is not None:
74 # Language defined? We can get all
75 translation = get_translation(request, project, subproject, lang)
76 subproject = translation.subproject
77 project = subproject.project
78 else:
79 translation = None
80 if subproject is not None:
81 # Component defined?
82 subproject = get_subproject(request, project, subproject)
83 project = subproject.project
84 elif project is not None:
85 # Only project defined?
86 project = get_project(request, project)
87
88 # Return tuple
89 return project, subproject, translation
90
91
92 def try_set_language(lang):
93 """Try to activate language"""
94
95 try:
96 django.utils.translation.activate(lang)
97 # workaround for https://code.djangoproject.com/ticket/26050
98 # pylint: disable=W0212
99 if trans_real.catalog()._catalog is None:
100 raise Exception('Invalid language!')
101 except Exception:
102 # Ignore failure on activating language
103 django.utils.translation.activate('en')
104
105
106 def import_message(request, count, message_none, message_ok):
107 if count == 0:
108 messages.warning(request, message_none)
109 else:
110 messages.success(request, message_ok % count)
111
112
113 def download_translation_file(translation, fmt=None):
114 if fmt is not None:
115 try:
116 exporter = get_exporter(fmt)(translation=translation)
117 except KeyError:
118 raise Http404('File format not supported')
119 exporter.add_units(translation)
120 return exporter.get_response(
121 '{{project}}-{0}-{{language}}.{{extension}}'.format(
122 translation.subproject.slug
123 )
124 )
125
126 srcfilename = translation.get_filename()
127
128 # Construct file name (do not use real filename as it is usually not
129 # that useful)
130 filename = '{0}-{1}-{2}.{3}'.format(
131 translation.subproject.project.slug,
132 translation.subproject.slug,
133 translation.language.code,
134 translation.store.extension
135 )
136
137 # Create response
138 with open(srcfilename) as handle:
139 response = HttpResponse(
140 handle.read(),
141 content_type=translation.store.mimetype
142 )
143
144 # Fill in response headers
145 response['Content-Disposition'] = 'attachment; filename={0}'.format(
146 filename
147 )
148
149 return response
150
151
152 def show_form_errors(request, form):
153 """Show all form errors as a message."""
154 for error in form.non_field_errors():
155 messages.error(request, error)
156 for field in form:
157 for error in field.errors:
158 messages.error(
159 request,
160 _('Error in parameter %(field)s: %(error)s') % {
161 'field': field.name,
162 'error': error
163 }
164 )
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/trans/views/helper.py b/weblate/trans/views/helper.py
--- a/weblate/trans/views/helper.py
+++ b/weblate/trans/views/helper.py
@@ -123,6 +123,10 @@
)
)
+ # Force flushing pending units
+ author = translation.get_last_author(True)
+ translation.update_units(author)
+
srcfilename = translation.get_filename()
# Construct file name (do not use real filename as it is usually not
| {"golden_diff": "diff --git a/weblate/trans/views/helper.py b/weblate/trans/views/helper.py\n--- a/weblate/trans/views/helper.py\n+++ b/weblate/trans/views/helper.py\n@@ -123,6 +123,10 @@\n )\n )\n \n+ # Force flushing pending units\n+ author = translation.get_last_author(True)\n+ translation.update_units(author)\n+\n srcfilename = translation.get_filename()\n \n # Construct file name (do not use real filename as it is usually not\n", "issue": "File download is outdated\n### Steps to reproduce\r\n1. Edit string.\r\n2. Donwload original translation file (without conversion).\r\n\r\n### Actual behaviour\r\nThe file does not have recent changes.\r\n\r\n### Expected behaviour\r\nAll changes should be reflected.\r\n\r\n### Server configuration\r\nCurrent master\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2017 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Helper methods for views.\"\"\"\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404\nimport django.utils.translation\nfrom django.utils.translation import trans_real, ugettext as _\n\nfrom weblate.utils import messages\nfrom weblate.permissions.helpers import check_access\nfrom weblate.trans.exporters import get_exporter\nfrom weblate.trans.models import Project, SubProject, Translation\n\n\ndef get_translation(request, project, subproject, lang, skip_acl=False):\n \"\"\"Return translation matching parameters.\"\"\"\n translation = get_object_or_404(\n Translation.objects.prefetch(),\n language__code=lang,\n subproject__slug=subproject,\n subproject__project__slug=project,\n enabled=True\n )\n if not skip_acl:\n check_access(request, translation.subproject.project)\n return translation\n\n\ndef get_subproject(request, project, subproject, skip_acl=False):\n \"\"\"Return subproject matching parameters.\"\"\"\n subproject = get_object_or_404(\n SubProject.objects.prefetch(),\n project__slug=project,\n slug=subproject\n )\n if not skip_acl:\n check_access(request, subproject.project)\n return subproject\n\n\ndef get_project(request, project, skip_acl=False):\n \"\"\"Return project matching parameters.\"\"\"\n project = get_object_or_404(\n Project,\n slug=project,\n )\n if not skip_acl:\n check_access(request, project)\n return project\n\n\ndef get_project_translation(request, project=None, subproject=None, lang=None):\n \"\"\"Return project, subproject, translation tuple for given parameters.\"\"\"\n\n if lang is not None and subproject is not None:\n # Language defined? We can get all\n translation = get_translation(request, project, subproject, lang)\n subproject = translation.subproject\n project = subproject.project\n else:\n translation = None\n if subproject is not None:\n # Component defined?\n subproject = get_subproject(request, project, subproject)\n project = subproject.project\n elif project is not None:\n # Only project defined?\n project = get_project(request, project)\n\n # Return tuple\n return project, subproject, translation\n\n\ndef try_set_language(lang):\n \"\"\"Try to activate language\"\"\"\n\n try:\n django.utils.translation.activate(lang)\n # workaround for https://code.djangoproject.com/ticket/26050\n # pylint: disable=W0212\n if trans_real.catalog()._catalog is None:\n raise Exception('Invalid language!')\n except Exception:\n # Ignore failure on activating language\n django.utils.translation.activate('en')\n\n\ndef import_message(request, count, message_none, message_ok):\n if count == 0:\n messages.warning(request, message_none)\n else:\n messages.success(request, message_ok % count)\n\n\ndef download_translation_file(translation, fmt=None):\n if fmt is not None:\n try:\n exporter = get_exporter(fmt)(translation=translation)\n except KeyError:\n raise Http404('File format not supported')\n exporter.add_units(translation)\n return exporter.get_response(\n '{{project}}-{0}-{{language}}.{{extension}}'.format(\n translation.subproject.slug\n )\n )\n\n srcfilename = translation.get_filename()\n\n # Construct file name (do not use real filename as it is usually not\n # that useful)\n filename = '{0}-{1}-{2}.{3}'.format(\n translation.subproject.project.slug,\n translation.subproject.slug,\n translation.language.code,\n translation.store.extension\n )\n\n # Create response\n with open(srcfilename) as handle:\n response = HttpResponse(\n handle.read(),\n content_type=translation.store.mimetype\n )\n\n # Fill in response headers\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n return response\n\n\ndef show_form_errors(request, form):\n \"\"\"Show all form errors as a message.\"\"\"\n for error in form.non_field_errors():\n messages.error(request, error)\n for field in form:\n for error in field.errors:\n messages.error(\n request,\n _('Error in parameter %(field)s: %(error)s') % {\n 'field': field.name,\n 'error': error\n }\n )\n", "path": "weblate/trans/views/helper.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2017 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Helper methods for views.\"\"\"\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404\nimport django.utils.translation\nfrom django.utils.translation import trans_real, ugettext as _\n\nfrom weblate.utils import messages\nfrom weblate.permissions.helpers import check_access\nfrom weblate.trans.exporters import get_exporter\nfrom weblate.trans.models import Project, SubProject, Translation\n\n\ndef get_translation(request, project, subproject, lang, skip_acl=False):\n \"\"\"Return translation matching parameters.\"\"\"\n translation = get_object_or_404(\n Translation.objects.prefetch(),\n language__code=lang,\n subproject__slug=subproject,\n subproject__project__slug=project,\n enabled=True\n )\n if not skip_acl:\n check_access(request, translation.subproject.project)\n return translation\n\n\ndef get_subproject(request, project, subproject, skip_acl=False):\n \"\"\"Return subproject matching parameters.\"\"\"\n subproject = get_object_or_404(\n SubProject.objects.prefetch(),\n project__slug=project,\n slug=subproject\n )\n if not skip_acl:\n check_access(request, subproject.project)\n return subproject\n\n\ndef get_project(request, project, skip_acl=False):\n \"\"\"Return project matching parameters.\"\"\"\n project = get_object_or_404(\n Project,\n slug=project,\n )\n if not skip_acl:\n check_access(request, project)\n return project\n\n\ndef get_project_translation(request, project=None, subproject=None, lang=None):\n \"\"\"Return project, subproject, translation tuple for given parameters.\"\"\"\n\n if lang is not None and subproject is not None:\n # Language defined? We can get all\n translation = get_translation(request, project, subproject, lang)\n subproject = translation.subproject\n project = subproject.project\n else:\n translation = None\n if subproject is not None:\n # Component defined?\n subproject = get_subproject(request, project, subproject)\n project = subproject.project\n elif project is not None:\n # Only project defined?\n project = get_project(request, project)\n\n # Return tuple\n return project, subproject, translation\n\n\ndef try_set_language(lang):\n \"\"\"Try to activate language\"\"\"\n\n try:\n django.utils.translation.activate(lang)\n # workaround for https://code.djangoproject.com/ticket/26050\n # pylint: disable=W0212\n if trans_real.catalog()._catalog is None:\n raise Exception('Invalid language!')\n except Exception:\n # Ignore failure on activating language\n django.utils.translation.activate('en')\n\n\ndef import_message(request, count, message_none, message_ok):\n if count == 0:\n messages.warning(request, message_none)\n else:\n messages.success(request, message_ok % count)\n\n\ndef download_translation_file(translation, fmt=None):\n if fmt is not None:\n try:\n exporter = get_exporter(fmt)(translation=translation)\n except KeyError:\n raise Http404('File format not supported')\n exporter.add_units(translation)\n return exporter.get_response(\n '{{project}}-{0}-{{language}}.{{extension}}'.format(\n translation.subproject.slug\n )\n )\n\n # Force flushing pending units\n author = translation.get_last_author(True)\n translation.update_units(author)\n\n srcfilename = translation.get_filename()\n\n # Construct file name (do not use real filename as it is usually not\n # that useful)\n filename = '{0}-{1}-{2}.{3}'.format(\n translation.subproject.project.slug,\n translation.subproject.slug,\n translation.language.code,\n translation.store.extension\n )\n\n # Create response\n with open(srcfilename) as handle:\n response = HttpResponse(\n handle.read(),\n content_type=translation.store.mimetype\n )\n\n # Fill in response headers\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n return response\n\n\ndef show_form_errors(request, form):\n \"\"\"Show all form errors as a message.\"\"\"\n for error in form.non_field_errors():\n messages.error(request, error)\n for field in form:\n for error in field.errors:\n messages.error(\n request,\n _('Error in parameter %(field)s: %(error)s') % {\n 'field': field.name,\n 'error': error\n }\n )\n", "path": "weblate/trans/views/helper.py"}]} | 1,843 | 112 |
gh_patches_debug_3989 | rasdani/github-patches | git_diff | matrix-org__synapse-8583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mypy==0.782 will cause mypy-zope dependency regression in future pip releases
## Description
Pip is planning to use a [new resolver by default from `20.3`](https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020), which is more strict in it's dependency handling.
Currently, `tox`'s `mypy` test environment defines this:
```
deps =
{[base]deps}
mypy==0.782
mypy-zope
```
Given that input, `pip>=20.3` will likely either:
1. Choose an older `mypy-zope` package that works with `mypy==0.782`, causing a regression.
2. ~~Reject installing outright, as this stricter dependency resolver will refuse to install incompatible packages.~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Copyright 2014-2017 OpenMarket Ltd
4 # Copyright 2017 Vector Creations Ltd
5 # Copyright 2017-2018 New Vector Ltd
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 import glob
19 import os
20
21 from setuptools import Command, find_packages, setup
22
23 here = os.path.abspath(os.path.dirname(__file__))
24
25
26 # Some notes on `setup.py test`:
27 #
28 # Once upon a time we used to try to make `setup.py test` run `tox` to run the
29 # tests. That's a bad idea for three reasons:
30 #
31 # 1: `setup.py test` is supposed to find out whether the tests work in the
32 # *current* environmentt, not whatever tox sets up.
33 # 2: Empirically, trying to install tox during the test run wasn't working ("No
34 # module named virtualenv").
35 # 3: The tox documentation advises against it[1].
36 #
37 # Even further back in time, we used to use setuptools_trial [2]. That has its
38 # own set of issues: for instance, it requires installation of Twisted to build
39 # an sdist (because the recommended mode of usage is to add it to
40 # `setup_requires`). That in turn means that in order to successfully run tox
41 # you have to have the python header files installed for whichever version of
42 # python tox uses (which is python3 on recent ubuntus, for example).
43 #
44 # So, for now at least, we stick with what appears to be the convention among
45 # Twisted projects, and don't attempt to do anything when someone runs
46 # `setup.py test`; instead we direct people to run `trial` directly if they
47 # care.
48 #
49 # [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
50 # [2]: https://pypi.python.org/pypi/setuptools_trial
51 class TestCommand(Command):
52 user_options = []
53
54 def initialize_options(self):
55 pass
56
57 def finalize_options(self):
58 pass
59
60 def run(self):
61 print(
62 """Synapse's tests cannot be run via setup.py. To run them, try:
63 PYTHONPATH="." trial tests
64 """
65 )
66
67
68 def read_file(path_segments):
69 """Read a file from the package. Takes a list of strings to join to
70 make the path"""
71 file_path = os.path.join(here, *path_segments)
72 with open(file_path) as f:
73 return f.read()
74
75
76 def exec_file(path_segments):
77 """Execute a single python file to get the variables defined in it"""
78 result = {}
79 code = read_file(path_segments)
80 exec(code, result)
81 return result
82
83
84 version = exec_file(("synapse", "__init__.py"))["__version__"]
85 dependencies = exec_file(("synapse", "python_dependencies.py"))
86 long_description = read_file(("README.rst",))
87
88 REQUIREMENTS = dependencies["REQUIREMENTS"]
89 CONDITIONAL_REQUIREMENTS = dependencies["CONDITIONAL_REQUIREMENTS"]
90 ALL_OPTIONAL_REQUIREMENTS = dependencies["ALL_OPTIONAL_REQUIREMENTS"]
91
92 # Make `pip install matrix-synapse[all]` install all the optional dependencies.
93 CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
94
95 # Developer dependencies should not get included in "all".
96 #
97 # We pin black so that our tests don't start failing on new releases.
98 CONDITIONAL_REQUIREMENTS["lint"] = [
99 "isort==5.0.3",
100 "black==19.10b0",
101 "flake8-comprehensions",
102 "flake8",
103 ]
104
105 # Dependencies which are exclusively required by unit test code. This is
106 # NOT a list of all modules that are necessary to run the unit tests.
107 # Tests assume that all optional dependencies are installed.
108 #
109 # parameterized_class decorator was introduced in parameterized 0.7.0
110 CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"]
111
112 setup(
113 name="matrix-synapse",
114 version=version,
115 packages=find_packages(exclude=["tests", "tests.*"]),
116 description="Reference homeserver for the Matrix decentralised comms protocol",
117 install_requires=REQUIREMENTS,
118 extras_require=CONDITIONAL_REQUIREMENTS,
119 include_package_data=True,
120 zip_safe=False,
121 long_description=long_description,
122 python_requires="~=3.5",
123 classifiers=[
124 "Development Status :: 5 - Production/Stable",
125 "Topic :: Communications :: Chat",
126 "License :: OSI Approved :: Apache Software License",
127 "Programming Language :: Python :: 3 :: Only",
128 "Programming Language :: Python :: 3.5",
129 "Programming Language :: Python :: 3.6",
130 "Programming Language :: Python :: 3.7",
131 "Programming Language :: Python :: 3.8",
132 ],
133 scripts=["synctl"] + glob.glob("scripts/*"),
134 cmdclass={"test": TestCommand},
135 )
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -102,6 +102,8 @@
"flake8",
]
+CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope"]
+
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -102,6 +102,8 @@\n \"flake8\",\n ]\n \n+CONDITIONAL_REQUIREMENTS[\"mypy\"] = [\"mypy==0.790\", \"mypy-zope\"]\n+\n # Dependencies which are exclusively required by unit test code. This is\n # NOT a list of all modules that are necessary to run the unit tests.\n # Tests assume that all optional dependencies are installed.\n", "issue": "mypy==0.782 will cause mypy-zope dependency regression in future pip releases\n## Description\r\n\r\nPip is planning to use a [new resolver by default from `20.3`](https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020), which is more strict in it's dependency handling.\r\n\r\nCurrently, `tox`'s `mypy` test environment defines this:\r\n\r\n```\r\ndeps =\r\n {[base]deps}\r\n mypy==0.782\r\n mypy-zope\r\n```\r\n\r\nGiven that input, `pip>=20.3` will likely either:\r\n1. Choose an older `mypy-zope` package that works with `mypy==0.782`, causing a regression.\r\n2. ~~Reject installing outright, as this stricter dependency resolver will refuse to install incompatible packages.~~\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright 2014-2017 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2017-2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\n\nfrom setuptools import Command, find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n# Some notes on `setup.py test`:\n#\n# Once upon a time we used to try to make `setup.py test` run `tox` to run the\n# tests. That's a bad idea for three reasons:\n#\n# 1: `setup.py test` is supposed to find out whether the tests work in the\n# *current* environmentt, not whatever tox sets up.\n# 2: Empirically, trying to install tox during the test run wasn't working (\"No\n# module named virtualenv\").\n# 3: The tox documentation advises against it[1].\n#\n# Even further back in time, we used to use setuptools_trial [2]. That has its\n# own set of issues: for instance, it requires installation of Twisted to build\n# an sdist (because the recommended mode of usage is to add it to\n# `setup_requires`). That in turn means that in order to successfully run tox\n# you have to have the python header files installed for whichever version of\n# python tox uses (which is python3 on recent ubuntus, for example).\n#\n# So, for now at least, we stick with what appears to be the convention among\n# Twisted projects, and don't attempt to do anything when someone runs\n# `setup.py test`; instead we direct people to run `trial` directly if they\n# care.\n#\n# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command\n# [2]: https://pypi.python.org/pypi/setuptools_trial\nclass TestCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print(\n \"\"\"Synapse's tests cannot be run via setup.py. To run them, try:\n PYTHONPATH=\".\" trial tests\n\"\"\"\n )\n\n\ndef read_file(path_segments):\n \"\"\"Read a file from the package. Takes a list of strings to join to\n make the path\"\"\"\n file_path = os.path.join(here, *path_segments)\n with open(file_path) as f:\n return f.read()\n\n\ndef exec_file(path_segments):\n \"\"\"Execute a single python file to get the variables defined in it\"\"\"\n result = {}\n code = read_file(path_segments)\n exec(code, result)\n return result\n\n\nversion = exec_file((\"synapse\", \"__init__.py\"))[\"__version__\"]\ndependencies = exec_file((\"synapse\", \"python_dependencies.py\"))\nlong_description = read_file((\"README.rst\",))\n\nREQUIREMENTS = dependencies[\"REQUIREMENTS\"]\nCONDITIONAL_REQUIREMENTS = dependencies[\"CONDITIONAL_REQUIREMENTS\"]\nALL_OPTIONAL_REQUIREMENTS = dependencies[\"ALL_OPTIONAL_REQUIREMENTS\"]\n\n# Make `pip install matrix-synapse[all]` install all the optional dependencies.\nCONDITIONAL_REQUIREMENTS[\"all\"] = list(ALL_OPTIONAL_REQUIREMENTS)\n\n# Developer dependencies should not get included in \"all\".\n#\n# We pin black so that our tests don't start failing on new releases.\nCONDITIONAL_REQUIREMENTS[\"lint\"] = [\n \"isort==5.0.3\",\n \"black==19.10b0\",\n \"flake8-comprehensions\",\n \"flake8\",\n]\n\n# Dependencies which are exclusively required by unit test code. This is\n# NOT a list of all modules that are necessary to run the unit tests.\n# Tests assume that all optional dependencies are installed.\n#\n# parameterized_class decorator was introduced in parameterized 0.7.0\nCONDITIONAL_REQUIREMENTS[\"test\"] = [\"mock>=2.0\", \"parameterized>=0.7.0\"]\n\nsetup(\n name=\"matrix-synapse\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n description=\"Reference homeserver for the Matrix decentralised comms protocol\",\n install_requires=REQUIREMENTS,\n extras_require=CONDITIONAL_REQUIREMENTS,\n include_package_data=True,\n zip_safe=False,\n long_description=long_description,\n python_requires=\"~=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Communications :: Chat\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n scripts=[\"synctl\"] + glob.glob(\"scripts/*\"),\n cmdclass={\"test\": TestCommand},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Copyright 2014-2017 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2017-2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\n\nfrom setuptools import Command, find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n# Some notes on `setup.py test`:\n#\n# Once upon a time we used to try to make `setup.py test` run `tox` to run the\n# tests. That's a bad idea for three reasons:\n#\n# 1: `setup.py test` is supposed to find out whether the tests work in the\n# *current* environmentt, not whatever tox sets up.\n# 2: Empirically, trying to install tox during the test run wasn't working (\"No\n# module named virtualenv\").\n# 3: The tox documentation advises against it[1].\n#\n# Even further back in time, we used to use setuptools_trial [2]. That has its\n# own set of issues: for instance, it requires installation of Twisted to build\n# an sdist (because the recommended mode of usage is to add it to\n# `setup_requires`). That in turn means that in order to successfully run tox\n# you have to have the python header files installed for whichever version of\n# python tox uses (which is python3 on recent ubuntus, for example).\n#\n# So, for now at least, we stick with what appears to be the convention among\n# Twisted projects, and don't attempt to do anything when someone runs\n# `setup.py test`; instead we direct people to run `trial` directly if they\n# care.\n#\n# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command\n# [2]: https://pypi.python.org/pypi/setuptools_trial\nclass TestCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print(\n \"\"\"Synapse's tests cannot be run via setup.py. To run them, try:\n PYTHONPATH=\".\" trial tests\n\"\"\"\n )\n\n\ndef read_file(path_segments):\n \"\"\"Read a file from the package. Takes a list of strings to join to\n make the path\"\"\"\n file_path = os.path.join(here, *path_segments)\n with open(file_path) as f:\n return f.read()\n\n\ndef exec_file(path_segments):\n \"\"\"Execute a single python file to get the variables defined in it\"\"\"\n result = {}\n code = read_file(path_segments)\n exec(code, result)\n return result\n\n\nversion = exec_file((\"synapse\", \"__init__.py\"))[\"__version__\"]\ndependencies = exec_file((\"synapse\", \"python_dependencies.py\"))\nlong_description = read_file((\"README.rst\",))\n\nREQUIREMENTS = dependencies[\"REQUIREMENTS\"]\nCONDITIONAL_REQUIREMENTS = dependencies[\"CONDITIONAL_REQUIREMENTS\"]\nALL_OPTIONAL_REQUIREMENTS = dependencies[\"ALL_OPTIONAL_REQUIREMENTS\"]\n\n# Make `pip install matrix-synapse[all]` install all the optional dependencies.\nCONDITIONAL_REQUIREMENTS[\"all\"] = list(ALL_OPTIONAL_REQUIREMENTS)\n\n# Developer dependencies should not get included in \"all\".\n#\n# We pin black so that our tests don't start failing on new releases.\nCONDITIONAL_REQUIREMENTS[\"lint\"] = [\n \"isort==5.0.3\",\n \"black==19.10b0\",\n \"flake8-comprehensions\",\n \"flake8\",\n]\n\nCONDITIONAL_REQUIREMENTS[\"mypy\"] = [\"mypy==0.790\", \"mypy-zope\"]\n\n# Dependencies which are exclusively required by unit test code. This is\n# NOT a list of all modules that are necessary to run the unit tests.\n# Tests assume that all optional dependencies are installed.\n#\n# parameterized_class decorator was introduced in parameterized 0.7.0\nCONDITIONAL_REQUIREMENTS[\"test\"] = [\"mock>=2.0\", \"parameterized>=0.7.0\"]\n\nsetup(\n name=\"matrix-synapse\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n description=\"Reference homeserver for the Matrix decentralised comms protocol\",\n install_requires=REQUIREMENTS,\n extras_require=CONDITIONAL_REQUIREMENTS,\n include_package_data=True,\n zip_safe=False,\n long_description=long_description,\n python_requires=\"~=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Communications :: Chat\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n scripts=[\"synctl\"] + glob.glob(\"scripts/*\"),\n cmdclass={\"test\": TestCommand},\n)\n", "path": "setup.py"}]} | 1,950 | 108 |
gh_patches_debug_23 | rasdani/github-patches | git_diff | horovod__horovod-3745 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No module named 'packaging' when install horovod
It seems that the horovod v0.26.0 has some dependency problems.
How long does it take for a new patch version to be released or should I pin the horovod version? ^_^

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/__init__.py`
Content:
```
1 from horovod.runner import run
2
3 __version__ = '0.26.0'
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/horovod/__init__.py b/horovod/__init__.py
--- a/horovod/__init__.py
+++ b/horovod/__init__.py
@@ -1,3 +1,3 @@
from horovod.runner import run
-__version__ = '0.26.0'
+__version__ = '0.26.1'
| {"golden_diff": "diff --git a/horovod/__init__.py b/horovod/__init__.py\n--- a/horovod/__init__.py\n+++ b/horovod/__init__.py\n@@ -1,3 +1,3 @@\n from horovod.runner import run\n \n-__version__ = '0.26.0'\n+__version__ = '0.26.1'\n", "issue": "No module named 'packaging' when install horovod\nIt seems that the horovod v0.26.0 has some dependency problems.\r\n\r\nHow long does it take for a new patch version to be released or should I pin the horovod version? ^_^\r\n\r\n\r\n\n", "before_files": [{"content": "from horovod.runner import run\n\n__version__ = '0.26.0'\n", "path": "horovod/__init__.py"}], "after_files": [{"content": "from horovod.runner import run\n\n__version__ = '0.26.1'\n", "path": "horovod/__init__.py"}]} | 410 | 88 |
gh_patches_debug_29710 | rasdani/github-patches | git_diff | keras-team__autokeras-166 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Model evaluation on test set yields different results on the same model
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
Test set evaluation yields different results if re-run.
### Reproducing Steps
Steps to reproduce the behavior:
re-run multiple times:
```python
y_pred = clf.evaluate(X_test, y_test)
print(y_pred)
```
will yield different results:
```
0.0992
0.1032
0.101
0.0989
```
Further, using manual evaluation:
```python
y_prediction = clf.predict(x_test=X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_pred=y_prediction, y_true=y_test)
```
leads to different results as well. It looks like the model either uses some random function (AFAIK all: ```random_states=42``` in the source code) or there is some major error in the pipeline that causes different predictions of the test set all the time.
### Expected Behavior
Final evaluation on a test set should not lead to different results using the same model on the same data.
### Setup Details
Include the details about the versions of:
- OS type and version: Linux
- Python: 3.6.5
- autokeras: 0.2.11
- scikit-learn:0.19.1
- numpy:1.14.5
- keras: 2.2.2
- scipy:1.1.0
- tensorflow: 1.10.0
- pytorch:0.4.1
### Additional context
This error is verified on MNIST and Fashion-MNIST.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/preprocessor.py`
Content:
```
1 import torch
2
3 import numpy as np
4 from torch.utils.data import Dataset, DataLoader
5 from torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose
6
7 from autokeras.constant import Constant
8
9
10 class OneHotEncoder:
11 """A class that can format data.
12
13 This class provides ways to transform data's classification label into vector.
14
15 Attributes:
16 data: The input data
17 n_classes: The number of classes in the classification problem.
18 labels: The number of labels.
19 label_to_vec: Mapping from label to vector.
20 int_to_label: Mapping from int to label.
21 """
22
23 def __init__(self):
24 """Initialize a OneHotEncoder"""
25 self.data = None
26 self.n_classes = 0
27 self.labels = None
28 self.label_to_vec = {}
29 self.int_to_label = {}
30
31 def fit(self, data):
32 """Create mapping from label to vector, and vector to label."""
33 data = np.array(data).flatten()
34 self.labels = set(data)
35 self.n_classes = len(self.labels)
36 for index, label in enumerate(self.labels):
37 vec = np.array([0] * self.n_classes)
38 vec[index] = 1
39 self.label_to_vec[label] = vec
40 self.int_to_label[index] = label
41
42 def transform(self, data):
43 """Get vector for every element in the data array."""
44 data = np.array(data)
45 if len(data.shape) > 1:
46 data = data.flatten()
47 return np.array(list(map(lambda x: self.label_to_vec[x], data)))
48
49 def inverse_transform(self, data):
50 """Get label for every element in data."""
51 return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))
52
53
54 class DataTransformer:
55 def __init__(self, data, augment=Constant.DATA_AUGMENTATION):
56 self.max_val = data.max()
57 data = data / self.max_val
58 self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten()
59 self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten()
60 self.augment = augment
61
62 def transform_train(self, data, targets=None, batch_size=None):
63 if not self.augment:
64 augment_list = []
65 else:
66 augment_list = [ToPILImage(),
67 RandomCrop(data.shape[1:3], padding=4),
68 RandomHorizontalFlip(),
69 ToTensor()
70 ]
71 common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
72 compose_list = augment_list + common_list
73
74 return self._transform(batch_size, compose_list, data, targets)
75
76 def transform_test(self, data, targets=None, batch_size=None):
77 common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
78 compose_list = common_list
79
80 return self._transform(batch_size, compose_list, data, targets)
81
82 def _transform(self, batch_size, compose_list, data, targets):
83 if batch_size is None:
84 batch_size = Constant.MAX_BATCH_SIZE
85 batch_size = min(len(data), batch_size)
86 data = data / self.max_val
87 data = torch.Tensor(data.transpose(0, 3, 1, 2))
88 data_transforms = Compose(compose_list)
89 dataset = MultiTransformDataset(data, targets, data_transforms)
90 return DataLoader(dataset, batch_size=batch_size, shuffle=True)
91
92
93 class MultiTransformDataset(Dataset):
94 def __init__(self, dataset, target, compose):
95 self.dataset = dataset
96 self.target = target
97 self.compose = compose
98
99 def __getitem__(self, index):
100 feature = self.dataset[index]
101 if self.target is None:
102 return self.compose(feature)
103 return self.compose(feature), self.target[index]
104
105 def __len__(self):
106 return len(self.dataset)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/autokeras/preprocessor.py b/autokeras/preprocessor.py
--- a/autokeras/preprocessor.py
+++ b/autokeras/preprocessor.py
@@ -71,23 +71,31 @@
common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
compose_list = augment_list + common_list
- return self._transform(batch_size, compose_list, data, targets)
+ dataset = self._transform(compose_list, data, targets)
+
+ if batch_size is None:
+ batch_size = Constant.MAX_BATCH_SIZE
+ batch_size = min(len(data), batch_size)
+
+ return DataLoader(dataset, batch_size=batch_size, shuffle=True)
def transform_test(self, data, targets=None, batch_size=None):
common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
compose_list = common_list
- return self._transform(batch_size, compose_list, data, targets)
+ dataset = self._transform(compose_list, data, targets)
- def _transform(self, batch_size, compose_list, data, targets):
if batch_size is None:
batch_size = Constant.MAX_BATCH_SIZE
batch_size = min(len(data), batch_size)
+
+ return DataLoader(dataset, batch_size=batch_size, shuffle=False)
+
+ def _transform(self, compose_list, data, targets):
data = data / self.max_val
data = torch.Tensor(data.transpose(0, 3, 1, 2))
data_transforms = Compose(compose_list)
- dataset = MultiTransformDataset(data, targets, data_transforms)
- return DataLoader(dataset, batch_size=batch_size, shuffle=True)
+ return MultiTransformDataset(data, targets, data_transforms)
class MultiTransformDataset(Dataset):
| {"golden_diff": "diff --git a/autokeras/preprocessor.py b/autokeras/preprocessor.py\n--- a/autokeras/preprocessor.py\n+++ b/autokeras/preprocessor.py\n@@ -71,23 +71,31 @@\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n \n- return self._transform(batch_size, compose_list, data, targets)\n+ dataset = self._transform(compose_list, data, targets)\n+\n+ if batch_size is None:\n+ batch_size = Constant.MAX_BATCH_SIZE\n+ batch_size = min(len(data), batch_size)\n+\n+ return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n \n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n \n- return self._transform(batch_size, compose_list, data, targets)\n+ dataset = self._transform(compose_list, data, targets)\n \n- def _transform(self, batch_size, compose_list, data, targets):\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n+\n+ return DataLoader(dataset, batch_size=batch_size, shuffle=False)\n+\n+ def _transform(self, compose_list, data, targets):\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n- dataset = MultiTransformDataset(data, targets, data_transforms)\n- return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n+ return MultiTransformDataset(data, targets, data_transforms)\n \n \n class MultiTransformDataset(Dataset):\n", "issue": "Model evaluation on test set yields different results on the same model\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\nTest set evaluation yields different results if re-run.\r\n\r\n### Reproducing Steps\r\nSteps to reproduce the behavior:\r\nre-run multiple times:\r\n```python\r\ny_pred = clf.evaluate(X_test, y_test)\r\nprint(y_pred)\r\n```\r\nwill yield different results:\r\n```\r\n0.0992\r\n0.1032\r\n0.101\r\n0.0989\r\n```\r\n\r\nFurther, using manual evaluation:\r\n```python\r\ny_prediction = clf.predict(x_test=X_test)\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(y_pred=y_prediction, y_true=y_test)\r\n```\r\nleads to different results as well. It looks like the model either uses some random function (AFAIK all: ```random_states=42``` in the source code) or there is some major error in the pipeline that causes different predictions of the test set all the time.\r\n\r\n### Expected Behavior\r\nFinal evaluation on a test set should not lead to different results using the same model on the same data.\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: Linux\r\n - Python: 3.6.5\r\n - autokeras: 0.2.11\r\n - scikit-learn:0.19.1\r\n - numpy:1.14.5\r\n - keras: 2.2.2\r\n - scipy:1.1.0\r\n - tensorflow: 1.10.0\r\n - pytorch:0.4.1\r\n\r\n### Additional context\r\nThis error is verified on MNIST and Fashion-MNIST.\n", "before_files": [{"content": "import torch\n\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose\n\nfrom autokeras.constant import Constant\n\n\nclass OneHotEncoder:\n \"\"\"A class that can format data.\n\n This class provides ways to transform data's classification label into vector.\n\n Attributes:\n data: The input data\n n_classes: The number of classes in the classification problem.\n labels: The number of labels.\n label_to_vec: Mapping from label to vector.\n int_to_label: Mapping from int to label.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a OneHotEncoder\"\"\"\n self.data = None\n self.n_classes = 0\n self.labels = None\n self.label_to_vec = {}\n self.int_to_label = {}\n\n def fit(self, data):\n \"\"\"Create mapping from label to vector, and vector to label.\"\"\"\n data = np.array(data).flatten()\n self.labels = set(data)\n self.n_classes = len(self.labels)\n for index, label in enumerate(self.labels):\n vec = np.array([0] * self.n_classes)\n vec[index] = 1\n self.label_to_vec[label] = vec\n self.int_to_label[index] = label\n\n def transform(self, data):\n \"\"\"Get vector for every element in the data array.\"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n data = data.flatten()\n return np.array(list(map(lambda x: self.label_to_vec[x], data)))\n\n def inverse_transform(self, data):\n \"\"\"Get label for every element in data.\"\"\"\n return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))\n\n\nclass DataTransformer:\n def __init__(self, data, augment=Constant.DATA_AUGMENTATION):\n self.max_val = data.max()\n data = data / self.max_val\n self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.augment = augment\n\n def transform_train(self, data, targets=None, batch_size=None):\n if not self.augment:\n augment_list = []\n else:\n augment_list = [ToPILImage(),\n RandomCrop(data.shape[1:3], padding=4),\n RandomHorizontalFlip(),\n ToTensor()\n ]\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n\n return self._transform(batch_size, compose_list, data, targets)\n\n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n\n return self._transform(batch_size, compose_list, data, targets)\n\n def _transform(self, batch_size, compose_list, data, targets):\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n dataset = MultiTransformDataset(data, targets, data_transforms)\n return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n\nclass MultiTransformDataset(Dataset):\n def __init__(self, dataset, target, compose):\n self.dataset = dataset\n self.target = target\n self.compose = compose\n\n def __getitem__(self, index):\n feature = self.dataset[index]\n if self.target is None:\n return self.compose(feature)\n return self.compose(feature), self.target[index]\n\n def __len__(self):\n return len(self.dataset)\n", "path": "autokeras/preprocessor.py"}], "after_files": [{"content": "import torch\n\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose\n\nfrom autokeras.constant import Constant\n\n\nclass OneHotEncoder:\n \"\"\"A class that can format data.\n\n This class provides ways to transform data's classification label into vector.\n\n Attributes:\n data: The input data\n n_classes: The number of classes in the classification problem.\n labels: The number of labels.\n label_to_vec: Mapping from label to vector.\n int_to_label: Mapping from int to label.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a OneHotEncoder\"\"\"\n self.data = None\n self.n_classes = 0\n self.labels = None\n self.label_to_vec = {}\n self.int_to_label = {}\n\n def fit(self, data):\n \"\"\"Create mapping from label to vector, and vector to label.\"\"\"\n data = np.array(data).flatten()\n self.labels = set(data)\n self.n_classes = len(self.labels)\n for index, label in enumerate(self.labels):\n vec = np.array([0] * self.n_classes)\n vec[index] = 1\n self.label_to_vec[label] = vec\n self.int_to_label[index] = label\n\n def transform(self, data):\n \"\"\"Get vector for every element in the data array.\"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n data = data.flatten()\n return np.array(list(map(lambda x: self.label_to_vec[x], data)))\n\n def inverse_transform(self, data):\n \"\"\"Get label for every element in data.\"\"\"\n return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))\n\n\nclass DataTransformer:\n def __init__(self, data, augment=Constant.DATA_AUGMENTATION):\n self.max_val = data.max()\n data = data / self.max_val\n self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.augment = augment\n\n def transform_train(self, data, targets=None, batch_size=None):\n if not self.augment:\n augment_list = []\n else:\n augment_list = [ToPILImage(),\n RandomCrop(data.shape[1:3], padding=4),\n RandomHorizontalFlip(),\n ToTensor()\n ]\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n\n dataset = self._transform(compose_list, data, targets)\n\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n\n return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n\n dataset = self._transform(compose_list, data, targets)\n\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n\n return DataLoader(dataset, batch_size=batch_size, shuffle=False)\n\n def _transform(self, compose_list, data, targets):\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n return MultiTransformDataset(data, targets, data_transforms)\n\n\nclass MultiTransformDataset(Dataset):\n def __init__(self, dataset, target, compose):\n self.dataset = dataset\n self.target = target\n self.compose = compose\n\n def __getitem__(self, index):\n feature = self.dataset[index]\n if self.target is None:\n return self.compose(feature)\n return self.compose(feature), self.target[index]\n\n def __len__(self):\n return len(self.dataset)\n", "path": "autokeras/preprocessor.py"}]} | 1,736 | 392 |
gh_patches_debug_9194 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2129 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show results framework for private projects
## Test plan
GIVEN the 'My results' tab in MyRSR
WHEN searching for a private project
THEN the project should appear in the list
WHEN the private project is opened in 'My results'
THEN the user should have full access to the results framework, just like a regular public project
## Issue description
Currently, we have disabled the results framework in MyRSR for private projects. However, it should be possible to enter results data for private projects as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/typeahead.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4 See more details in the license.txt file located at the root folder of the
5 Akvo RSR module. For additional details on the GNU license please
6 see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from akvo.rest.serializers import (TypeaheadCountrySerializer,
10 TypeaheadOrganisationSerializer,
11 TypeaheadProjectSerializer,
12 TypeaheadProjectUpdateSerializer)
13 from akvo.rsr.models import Country, Organisation, Project, ProjectUpdate
14
15 from rest_framework.decorators import api_view
16 from rest_framework.response import Response
17
18
19 def rejig(queryset, serializer):
20 """Rearrange & add queryset count to the response data."""
21 return {
22 'count': queryset.count(),
23 'results': serializer.data
24 }
25
26
27 @api_view(['GET'])
28 def typeahead_country(request):
29 countries = Country.objects.all()
30 return Response(
31 rejig(countries, TypeaheadCountrySerializer(countries, many=True))
32 )
33
34
35 @api_view(['GET'])
36 def typeahead_organisation(request):
37 organisations = Organisation.objects.all()
38 return Response(
39 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
40 many=True))
41 )
42
43
44 @api_view(['GET'])
45 def typeahead_user_organisations(request):
46 user = request.user
47 is_admin = user.is_active and (user.is_superuser or user.is_admin)
48 organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()
49 return Response(
50 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
51 many=True))
52 )
53
54
55 @api_view(['GET'])
56 def typeahead_project(request):
57 projects = Project.objects.all().exclude(title='')
58 return Response(
59 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
60 )
61
62
63 @api_view(['GET'])
64 def typeahead_user_projects(request):
65 user = request.user
66 is_admin = user.is_active and (user.is_superuser or user.is_admin)
67 if is_admin:
68 projects = Project.objects.all()
69 else:
70 projects = user.approved_organisations().all_projects()
71 projects = projects.exclude(title='')
72 return Response(
73 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
74 )
75
76
77 @api_view(['GET'])
78 def typeahead_impact_projects(request):
79 user = request.user
80 projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
81 projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')
82
83 return Response(
84 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
85 )
86
87
88 @api_view(['GET'])
89 def typeahead_projectupdate(request):
90 updates = ProjectUpdate.objects.all()
91 return Response(
92 rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))
93 )
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py
--- a/akvo/rest/views/typeahead.py
+++ b/akvo/rest/views/typeahead.py
@@ -78,7 +78,7 @@
def typeahead_impact_projects(request):
user = request.user
projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
- projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')
+ projects = projects.published().filter(is_impact_project=True).order_by('title')
return Response(
rejig(projects, TypeaheadProjectSerializer(projects, many=True))
| {"golden_diff": "diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py\n--- a/akvo/rest/views/typeahead.py\n+++ b/akvo/rest/views/typeahead.py\n@@ -78,7 +78,7 @@\n def typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n- projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')\n+ projects = projects.published().filter(is_impact_project=True).order_by('title')\n \n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n", "issue": "Show results framework for private projects\n## Test plan\n\nGIVEN the 'My results' tab in MyRSR\nWHEN searching for a private project\nTHEN the project should appear in the list\n\nWHEN the private project is opened in 'My results'\nTHEN the user should have full access to the results framework, just like a regular public project\n## Issue description\n\nCurrently, we have disabled the results framework in MyRSR for private projects. However, it should be possible to enter results data for private projects as well.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\nfrom akvo.rsr.models import Country, Organisation, Project, ProjectUpdate\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n countries = Country.objects.all()\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n projects = Project.objects.all().exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\nfrom akvo.rsr.models import Country, Organisation, Project, ProjectUpdate\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n countries = Country.objects.all()\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n projects = Project.objects.all().exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}]} | 1,178 | 153 |
gh_patches_debug_24995 | rasdani/github-patches | git_diff | e-valuation__EvaP-1853 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Investigate Github Action caching
One can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/management/commands/scss.py`
Content:
```
1 import os
2 import subprocess # nosec
3
4 from django.conf import settings
5 from django.core.management.base import BaseCommand
6
7
8 class Command(BaseCommand):
9 def add_arguments(self, parser):
10 parser.add_argument(
11 "--watch",
12 action="store_true",
13 help="Watch stylesheets and recompile when they change.",
14 )
15 parser.add_argument(
16 "--production",
17 action="store_true",
18 help="Compress output stylesheet and do not generate source maps."
19 " Intended to use in production deployment.",
20 )
21
22 def handle(self, *args, **options):
23 static_directory = settings.STATICFILES_DIRS[0]
24 command = [
25 "npx",
26 "sass",
27 os.path.join(static_directory, "scss", "evap.scss"),
28 os.path.join(static_directory, "css", "evap.css"),
29 ]
30
31 if options["watch"]:
32 command += ["--watch", "--poll"]
33
34 if options["production"]:
35 command += ["--style", "compressed", "--no-source-map"]
36
37 try:
38 subprocess.run(command, check=True) # nosec
39 except FileNotFoundError:
40 print("Could not find sass command", file=self.stderr)
41 except KeyboardInterrupt:
42 pass
43
```
Path: `evap/evaluation/management/commands/ts.py`
Content:
```
1 import argparse
2 import os
3 import subprocess # nosec
4 import unittest
5
6 from django.conf import settings
7 from django.core.management import call_command
8 from django.core.management.base import BaseCommand, CommandError
9 from django.test.runner import DiscoverRunner
10
11
12 class RenderPagesRunner(DiscoverRunner):
13 """Test runner which only includes `render_pages.*` methods.
14 The actual logic of the page rendering is implemented in the `@render_pages` decorator."""
15
16 test_loader = unittest.TestLoader()
17
18 def __init__(self, **kwargs):
19 super().__init__(**kwargs)
20 self.test_loader.testMethodPrefix = "render_pages"
21
22
23 class Command(BaseCommand):
24 def add_arguments(self, parser: argparse.ArgumentParser):
25 subparsers = parser.add_subparsers(dest="command", required=True)
26 compile_parser = subparsers.add_parser("compile")
27 compile_parser.add_argument(
28 "--watch",
29 action="store_true",
30 help="Watch scripts and recompile when they change.",
31 )
32 self.add_fresh_argument(compile_parser)
33 test_parser = subparsers.add_parser("test")
34 self.add_fresh_argument(test_parser)
35 subparsers.add_parser("render_pages")
36
37 @staticmethod
38 def add_fresh_argument(parser: argparse.ArgumentParser):
39 parser.add_argument(
40 "--fresh",
41 action="store_true",
42 help="Delete .tsbuildinfo.json before compilation to force a fresh compilation."
43 "This is useful when incremental compilation does not yield the expected output.",
44 )
45
46 def handle(self, *args, **options):
47 if options["command"] == "compile":
48 self.compile(**options)
49 elif options["command"] == "test":
50 self.test(**options)
51 elif options["command"] == "render_pages":
52 self.render_pages(**options)
53
54 def run_command(self, command):
55 try:
56 subprocess.run(command, check=True) # nosec
57 except FileNotFoundError:
58 print(f"Could not find {command[0]} command", file=self.stderr)
59 except KeyboardInterrupt:
60 pass
61 except subprocess.CalledProcessError as e:
62 raise CommandError("Error during command execution", returncode=e.returncode) from e
63
64 def compile(self, watch=False, fresh=False, **_options):
65 static_directory = settings.STATICFILES_DIRS[0]
66 command = [
67 "npx",
68 "tsc",
69 "--project",
70 os.path.join(static_directory, "ts", "tsconfig.compile.json"),
71 ]
72
73 if watch:
74 command += ["--watch"]
75
76 if fresh:
77 try:
78 os.remove(os.path.join(static_directory, "ts", ".tsbuildinfo.json"))
79 except FileNotFoundError:
80 pass
81
82 self.run_command(command)
83
84 def test(self, **options):
85 call_command("scss")
86 self.compile(**options)
87 self.render_pages()
88 self.run_command(["npx", "jest"])
89
90 @staticmethod
91 def render_pages(**_options):
92 # Enable debug mode as otherwise a collectstatic beforehand would be necessary,
93 # as missing static files would result into an error.
94 test_runner = RenderPagesRunner(debug_mode=True)
95 failed_tests = test_runner.run_tests([])
96 if failed_tests > 0:
97 raise CommandError("Failures during render_pages")
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py
--- a/evap/evaluation/management/commands/scss.py
+++ b/evap/evaluation/management/commands/scss.py
@@ -2,7 +2,7 @@
import subprocess # nosec
from django.conf import settings
-from django.core.management.base import BaseCommand
+from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
@@ -36,7 +36,7 @@
try:
subprocess.run(command, check=True) # nosec
- except FileNotFoundError:
- print("Could not find sass command", file=self.stderr)
+ except FileNotFoundError as e:
+ raise CommandError("Could not find sass command") from e
except KeyboardInterrupt:
pass
diff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py
--- a/evap/evaluation/management/commands/ts.py
+++ b/evap/evaluation/management/commands/ts.py
@@ -54,8 +54,8 @@
def run_command(self, command):
try:
subprocess.run(command, check=True) # nosec
- except FileNotFoundError:
- print(f"Could not find {command[0]} command", file=self.stderr)
+ except FileNotFoundError as e:
+ raise CommandError(f"Could not find {command[0]} command") from e
except KeyboardInterrupt:
pass
except subprocess.CalledProcessError as e:
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py\n--- a/evap/evaluation/management/commands/scss.py\n+++ b/evap/evaluation/management/commands/scss.py\n@@ -2,7 +2,7 @@\n import subprocess # nosec\n \n from django.conf import settings\n-from django.core.management.base import BaseCommand\n+from django.core.management.base import BaseCommand, CommandError\n \n \n class Command(BaseCommand):\n@@ -36,7 +36,7 @@\n \n try:\n subprocess.run(command, check=True) # nosec\n- except FileNotFoundError:\n- print(\"Could not find sass command\", file=self.stderr)\n+ except FileNotFoundError as e:\n+ raise CommandError(\"Could not find sass command\") from e\n except KeyboardInterrupt:\n pass\ndiff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py\n--- a/evap/evaluation/management/commands/ts.py\n+++ b/evap/evaluation/management/commands/ts.py\n@@ -54,8 +54,8 @@\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n- except FileNotFoundError:\n- print(f\"Could not find {command[0]} command\", file=self.stderr)\n+ except FileNotFoundError as e:\n+ raise CommandError(f\"Could not find {command[0]} command\") from e\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n", "issue": "Investigate Github Action caching\nOne can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain.\n", "before_files": [{"content": "import os\nimport subprocess # nosec\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch stylesheets and recompile when they change.\",\n )\n parser.add_argument(\n \"--production\",\n action=\"store_true\",\n help=\"Compress output stylesheet and do not generate source maps.\"\n \" Intended to use in production deployment.\",\n )\n\n def handle(self, *args, **options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"sass\",\n os.path.join(static_directory, \"scss\", \"evap.scss\"),\n os.path.join(static_directory, \"css\", \"evap.css\"),\n ]\n\n if options[\"watch\"]:\n command += [\"--watch\", \"--poll\"]\n\n if options[\"production\"]:\n command += [\"--style\", \"compressed\", \"--no-source-map\"]\n\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError:\n print(\"Could not find sass command\", file=self.stderr)\n except KeyboardInterrupt:\n pass\n", "path": "evap/evaluation/management/commands/scss.py"}, {"content": "import argparse\nimport os\nimport subprocess # nosec\nimport unittest\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.test.runner import DiscoverRunner\n\n\nclass RenderPagesRunner(DiscoverRunner):\n \"\"\"Test runner which only includes `render_pages.*` methods.\n The actual logic of the page rendering is implemented in the `@render_pages` decorator.\"\"\"\n\n test_loader = unittest.TestLoader()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.test_loader.testMethodPrefix = \"render_pages\"\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser: argparse.ArgumentParser):\n subparsers = parser.add_subparsers(dest=\"command\", required=True)\n compile_parser = subparsers.add_parser(\"compile\")\n compile_parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch scripts and recompile when they change.\",\n )\n self.add_fresh_argument(compile_parser)\n test_parser = subparsers.add_parser(\"test\")\n self.add_fresh_argument(test_parser)\n subparsers.add_parser(\"render_pages\")\n\n @staticmethod\n def add_fresh_argument(parser: argparse.ArgumentParser):\n parser.add_argument(\n \"--fresh\",\n action=\"store_true\",\n help=\"Delete .tsbuildinfo.json before compilation to force a fresh compilation.\"\n \"This is useful when incremental compilation does not yield the expected output.\",\n )\n\n def handle(self, *args, **options):\n if options[\"command\"] == \"compile\":\n self.compile(**options)\n elif options[\"command\"] == \"test\":\n self.test(**options)\n elif options[\"command\"] == \"render_pages\":\n self.render_pages(**options)\n\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError:\n print(f\"Could not find {command[0]} command\", file=self.stderr)\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n raise CommandError(\"Error during command execution\", returncode=e.returncode) from e\n\n def compile(self, watch=False, fresh=False, **_options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"tsc\",\n \"--project\",\n os.path.join(static_directory, \"ts\", \"tsconfig.compile.json\"),\n ]\n\n if watch:\n command += [\"--watch\"]\n\n if fresh:\n try:\n os.remove(os.path.join(static_directory, \"ts\", \".tsbuildinfo.json\"))\n except FileNotFoundError:\n pass\n\n self.run_command(command)\n\n def test(self, **options):\n call_command(\"scss\")\n self.compile(**options)\n self.render_pages()\n self.run_command([\"npx\", \"jest\"])\n\n @staticmethod\n def render_pages(**_options):\n # Enable debug mode as otherwise a collectstatic beforehand would be necessary,\n # as missing static files would result into an error.\n test_runner = RenderPagesRunner(debug_mode=True)\n failed_tests = test_runner.run_tests([])\n if failed_tests > 0:\n raise CommandError(\"Failures during render_pages\")\n", "path": "evap/evaluation/management/commands/ts.py"}], "after_files": [{"content": "import os\nimport subprocess # nosec\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch stylesheets and recompile when they change.\",\n )\n parser.add_argument(\n \"--production\",\n action=\"store_true\",\n help=\"Compress output stylesheet and do not generate source maps.\"\n \" Intended to use in production deployment.\",\n )\n\n def handle(self, *args, **options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"sass\",\n os.path.join(static_directory, \"scss\", \"evap.scss\"),\n os.path.join(static_directory, \"css\", \"evap.css\"),\n ]\n\n if options[\"watch\"]:\n command += [\"--watch\", \"--poll\"]\n\n if options[\"production\"]:\n command += [\"--style\", \"compressed\", \"--no-source-map\"]\n\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError as e:\n raise CommandError(\"Could not find sass command\") from e\n except KeyboardInterrupt:\n pass\n", "path": "evap/evaluation/management/commands/scss.py"}, {"content": "import argparse\nimport os\nimport subprocess # nosec\nimport unittest\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.test.runner import DiscoverRunner\n\n\nclass RenderPagesRunner(DiscoverRunner):\n \"\"\"Test runner which only includes `render_pages.*` methods.\n The actual logic of the page rendering is implemented in the `@render_pages` decorator.\"\"\"\n\n test_loader = unittest.TestLoader()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.test_loader.testMethodPrefix = \"render_pages\"\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser: argparse.ArgumentParser):\n subparsers = parser.add_subparsers(dest=\"command\", required=True)\n compile_parser = subparsers.add_parser(\"compile\")\n compile_parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch scripts and recompile when they change.\",\n )\n self.add_fresh_argument(compile_parser)\n test_parser = subparsers.add_parser(\"test\")\n self.add_fresh_argument(test_parser)\n subparsers.add_parser(\"render_pages\")\n\n @staticmethod\n def add_fresh_argument(parser: argparse.ArgumentParser):\n parser.add_argument(\n \"--fresh\",\n action=\"store_true\",\n help=\"Delete .tsbuildinfo.json before compilation to force a fresh compilation.\"\n \"This is useful when incremental compilation does not yield the expected output.\",\n )\n\n def handle(self, *args, **options):\n if options[\"command\"] == \"compile\":\n self.compile(**options)\n elif options[\"command\"] == \"test\":\n self.test(**options)\n elif options[\"command\"] == \"render_pages\":\n self.render_pages(**options)\n\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError as e:\n raise CommandError(f\"Could not find {command[0]} command\") from e\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n raise CommandError(\"Error during command execution\", returncode=e.returncode) from e\n\n def compile(self, watch=False, fresh=False, **_options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"tsc\",\n \"--project\",\n os.path.join(static_directory, \"ts\", \"tsconfig.compile.json\"),\n ]\n\n if watch:\n command += [\"--watch\"]\n\n if fresh:\n try:\n os.remove(os.path.join(static_directory, \"ts\", \".tsbuildinfo.json\"))\n except FileNotFoundError:\n pass\n\n self.run_command(command)\n\n def test(self, **options):\n call_command(\"scss\")\n self.compile(**options)\n self.render_pages()\n self.run_command([\"npx\", \"jest\"])\n\n @staticmethod\n def render_pages(**_options):\n # Enable debug mode as otherwise a collectstatic beforehand would be necessary,\n # as missing static files would result into an error.\n test_runner = RenderPagesRunner(debug_mode=True)\n failed_tests = test_runner.run_tests([])\n if failed_tests > 0:\n raise CommandError(\"Failures during render_pages\")\n", "path": "evap/evaluation/management/commands/ts.py"}]} | 1,561 | 348 |
gh_patches_debug_3823 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-5701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Latest release broken on Ubuntu 22.04 - ImportError: libffi.so.7: cannot open shared object file: No such file or directory
#### Problem Description
https://askubuntu.com/questions/1286772/libffi-so-7-cannot-open-shared-object-file-no-such-file-or-directory
I don't think our users should do anything with their system to make the binary run, correct?
```
$ ./mitmdump
Traceback (most recent call last):
File "mitmdump", line 3, in <module>
File "mitmproxy/tools/main.py", line 131, in mitmdump
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "mitmproxy/tools/dump.py", line 1, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "mitmproxy/addons/__init__.py", line 10, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "mitmproxy/addons/cut.py", line 14, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "pyperclip/__init__.py", line 52, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "ctypes/__init__.py", line 8, in <module>
ImportError: libffi.so.7: cannot open shared object file: No such file or directory
[50935] Failed to execute script 'mitmdump' due to unhandled exception!
```
#### Steps to reproduce the behavior:
1. `mitmdump`
Repros in a clean `docker run --rm -it ubuntu:22.04` as well
#### System Information
9.0.0 just downloaded (I cannot run `mitmdump --version` as it doesn't launch)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import re
3 from codecs import open
4
5 from setuptools import find_packages, setup
6
7 # Based on https://github.com/pypa/sampleproject/blob/main/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14 long_description_content_type = "text/markdown"
15
16 with open(os.path.join(here, "mitmproxy/version.py")) as f:
17 match = re.search(r'VERSION = "(.+?)"', f.read())
18 assert match
19 VERSION = match.group(1)
20
21 setup(
22 name="mitmproxy",
23 version=VERSION,
24 description="An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.",
25 long_description=long_description,
26 long_description_content_type=long_description_content_type,
27 url="http://mitmproxy.org",
28 author="Aldo Cortesi",
29 author_email="[email protected]",
30 license="MIT",
31 classifiers=[
32 "License :: OSI Approved :: MIT License",
33 "Development Status :: 5 - Production/Stable",
34 "Environment :: Console :: Curses",
35 "Operating System :: MacOS",
36 "Operating System :: POSIX",
37 "Operating System :: Microsoft :: Windows",
38 "Programming Language :: Python :: 3 :: Only",
39 "Programming Language :: Python :: 3.9",
40 "Programming Language :: Python :: 3.10",
41 "Programming Language :: Python :: 3.11",
42 "Programming Language :: Python :: Implementation :: CPython",
43 "Topic :: Security",
44 "Topic :: Internet :: WWW/HTTP",
45 "Topic :: Internet :: Proxy Servers",
46 "Topic :: System :: Networking :: Monitoring",
47 "Topic :: Software Development :: Testing",
48 "Typing :: Typed",
49 ],
50 project_urls={
51 "Documentation": "https://docs.mitmproxy.org/stable/",
52 "Source": "https://github.com/mitmproxy/mitmproxy/",
53 "Tracker": "https://github.com/mitmproxy/mitmproxy/issues",
54 },
55 packages=find_packages(
56 include=[
57 "mitmproxy",
58 "mitmproxy.*",
59 ]
60 ),
61 include_package_data=True,
62 entry_points={
63 "console_scripts": [
64 "mitmproxy = mitmproxy.tools.main:mitmproxy",
65 "mitmdump = mitmproxy.tools.main:mitmdump",
66 "mitmweb = mitmproxy.tools.main:mitmweb",
67 ],
68 "pyinstaller40": [
69 "hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs",
70 ]
71 },
72 python_requires=">=3.9",
73 # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires
74 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
75 install_requires=[
76 "asgiref>=3.2.10,<3.6",
77 "Brotli>=1.0,<1.1",
78 "certifi>=2019.9.11", # no semver here - this should always be on the last release!
79 "cryptography>=38.0,<38.1",
80 "flask>=1.1.1,<2.3",
81 "h11>=0.11,<0.15",
82 "h2>=4.1,<5",
83 "hyperframe>=6.0,<7",
84 "kaitaistruct>=0.10,<0.11",
85 "ldap3>=2.8,<2.10",
86 "mitmproxy_wireguard>=0.1.6,<0.2",
87 "msgpack>=1.0.0, <1.1.0",
88 "passlib>=1.6.5, <1.8",
89 "protobuf>=3.14,<5",
90 "pyOpenSSL>=22.1,<22.2",
91 "pyparsing>=2.4.2,<3.1",
92 "pyperclip>=1.6.0,<1.9",
93 "ruamel.yaml>=0.16,<0.18",
94 "sortedcontainers>=2.3,<2.5",
95 "tornado>=6.1,<7",
96 "urwid>=2.1.1,<2.2",
97 "wsproto>=1.0,<1.3",
98 "publicsuffix2>=2.20190812,<3",
99 "zstandard>=0.11,<0.20",
100 "typing-extensions>=4.3,<4.5; python_version<'3.10'",
101 ],
102 extras_require={
103 ':sys_platform == "win32"': [
104 "pydivert>=2.0.3,<2.2",
105 ],
106 "dev": [
107 "click>=7.0,<8.2",
108 "hypothesis>=5.8,<7",
109 "parver>=0.1,<2.0",
110 "pdoc>=4.0.0",
111 "pyinstaller==5.6.1",
112 "pytest-asyncio>=0.17,<0.21",
113 "pytest-cov>=2.7.1,<4.1",
114 "pytest-timeout>=1.3.3,<2.2",
115 "pytest-xdist>=2.1.0,<3.1",
116 "pytest>=6.1.0,<8",
117 "requests>=2.9.1,<3",
118 "tox>=3.5,<4",
119 "wheel>=0.36.2,<0.39",
120 ],
121 },
122 )
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -108,7 +108,7 @@
"hypothesis>=5.8,<7",
"parver>=0.1,<2.0",
"pdoc>=4.0.0",
- "pyinstaller==5.6.1",
+ "pyinstaller==5.5",
"pytest-asyncio>=0.17,<0.21",
"pytest-cov>=2.7.1,<4.1",
"pytest-timeout>=1.3.3,<2.2",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -108,7 +108,7 @@\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n- \"pyinstaller==5.6.1\",\n+ \"pyinstaller==5.5\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n", "issue": "Latest release broken on Ubuntu 22.04 - ImportError: libffi.so.7: cannot open shared object file: No such file or directory\n#### Problem Description\r\n\r\nhttps://askubuntu.com/questions/1286772/libffi-so-7-cannot-open-shared-object-file-no-such-file-or-directory\r\n\r\nI don't think our users should do anything with their system to make the binary run, correct?\r\n\r\n```\r\n$ ./mitmdump \r\nTraceback (most recent call last):\r\n File \"mitmdump\", line 3, in <module>\r\n File \"mitmproxy/tools/main.py\", line 131, in mitmdump\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"mitmproxy/tools/dump.py\", line 1, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"mitmproxy/addons/__init__.py\", line 10, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"mitmproxy/addons/cut.py\", line 14, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"pyperclip/__init__.py\", line 52, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"ctypes/__init__.py\", line 8, in <module>\r\nImportError: libffi.so.7: cannot open shared object file: No such file or directory\r\n[50935] Failed to execute script 'mitmdump' due to unhandled exception!\r\n```\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\n1. `mitmdump`\r\n\r\nRepros in a clean `docker run --rm -it ubuntu:22.04` as well\r\n\r\n#### System Information\r\n\r\n9.0.0 just downloaded (I cannot run `mitmdump --version` as it doesn't launch)\r\n\n", "before_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy/version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.mitmproxy.org/stable/\",\n \"Source\": \"https://github.com/mitmproxy/mitmproxy/\",\n \"Tracker\": \"https://github.com/mitmproxy/mitmproxy/issues\",\n },\n packages=find_packages(\n include=[\n \"mitmproxy\",\n \"mitmproxy.*\",\n ]\n ),\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ],\n \"pyinstaller40\": [\n \"hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs\",\n ]\n },\n python_requires=\">=3.9\",\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"asgiref>=3.2.10,<3.6\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"cryptography>=38.0,<38.1\",\n \"flask>=1.1.1,<2.3\",\n \"h11>=0.11,<0.15\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.10,<0.11\",\n \"ldap3>=2.8,<2.10\",\n \"mitmproxy_wireguard>=0.1.6,<0.2\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<5\",\n \"pyOpenSSL>=22.1,<22.2\",\n \"pyparsing>=2.4.2,<3.1\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.18\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.3\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.20\",\n \"typing-extensions>=4.3,<4.5; python_version<'3.10'\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n \"dev\": [\n \"click>=7.0,<8.2\",\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==5.6.1\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n \"pytest-xdist>=2.1.0,<3.1\",\n \"pytest>=6.1.0,<8\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.39\",\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy/version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.mitmproxy.org/stable/\",\n \"Source\": \"https://github.com/mitmproxy/mitmproxy/\",\n \"Tracker\": \"https://github.com/mitmproxy/mitmproxy/issues\",\n },\n packages=find_packages(\n include=[\n \"mitmproxy\",\n \"mitmproxy.*\",\n ]\n ),\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ],\n \"pyinstaller40\": [\n \"hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs\",\n ]\n },\n python_requires=\">=3.9\",\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"asgiref>=3.2.10,<3.6\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"cryptography>=38.0,<38.1\",\n \"flask>=1.1.1,<2.3\",\n \"h11>=0.11,<0.15\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.10,<0.11\",\n \"ldap3>=2.8,<2.10\",\n \"mitmproxy_wireguard>=0.1.6,<0.2\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<5\",\n \"pyOpenSSL>=22.1,<22.2\",\n \"pyparsing>=2.4.2,<3.1\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.18\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.3\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.20\",\n \"typing-extensions>=4.3,<4.5; python_version<'3.10'\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n \"dev\": [\n \"click>=7.0,<8.2\",\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==5.5\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n \"pytest-xdist>=2.1.0,<3.1\",\n \"pytest>=6.1.0,<8\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.39\",\n ],\n },\n)\n", "path": "setup.py"}]} | 2,263 | 141 |
gh_patches_debug_34206 | rasdani/github-patches | git_diff | kymatio__kymatio-244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RELEASE requirements?
Hi,
When doing:
```
conda install pytorch torchvision -c pytorch
pip install -i https://test.pypi.org/simple/ kymatio
```
then cloning the corresponding release, and doing `pytest`, I have tons of errors that are evidences that none of the requirements.txt packages are installed. Actually, this makes me think we should have a pip for the GPU version(like `pip install kymatio-gpu`)?
https://github.com/kymatio/kymatio/blob/master/requirements.txt
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import csv
3 import importlib
4 import os
5 import shutil
6 import sys
7 from setuptools import setup, find_packages
8
9 # Constants
10 DISTNAME = 'kymatio'
11 DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
12 URL = 'https://kymatio.github.io'
13 LICENSE = 'BSD-3-Clause'
14
15
16 # Parse description
17 with open('README.md') as f:
18 README = f.read().split('\n')
19 LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
20
21
22 # Parse version.py
23 kymatio_version_spec = importlib.util.spec_from_file_location(
24 'kymatio_version', 'kymatio/version.py')
25 kymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)
26 kymatio_version_spec.loader.exec_module(kymatio_version_module)
27 VERSION = kymatio_version_module.version
28
29
30 # Parse requirements.txt
31 with open('requirements.txt', 'r') as f:
32 REQUIREMENTS = f.read().split('\n')
33
34
35 setup_info = dict(
36 # Metadata
37 name=DISTNAME,
38 version=VERSION,
39 author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '
40 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '
41 'Louis Thiry, Vincent Lostanlen, Joakim Andén, '
42 'Tomás Angles, Gabriel Huang, Roberto Leonarduzzi'),
43 author_email=('[email protected], [email protected], '
44 '[email protected], [email protected], '
45 '[email protected], [email protected], '
46 '[email protected], [email protected], [email protected], '
47 '[email protected], [email protected], [email protected]'),
48 url=URL,
49 download_url='https://github.com/kymatio/kymatio/releases',
50 classifiers=['Intended Audience :: Education',
51 'Intended Audience :: Science/Research',
52 'License :: OSI Approved :: BSD License',
53 'Natural Language :: English',
54 'Operating System :: MacOS',
55 'Operating System :: Microsoft :: Windows',
56 'Operating System :: POSIX :: Linux',
57 'Programming Language :: Python :: 3.4',
58 'Programming Language :: Python :: 3.5',
59 'Programming Language :: Python :: 3.6',
60 'Programming Language :: Python :: 3.7',
61 'Programming Language :: Python :: 3.8',
62 'Topic :: Multimedia :: Graphics :: 3D Modeling',
63 'Topic :: Multimedia :: Sound/Audio :: Analysis',
64 'Topic :: Scientific/Engineering :: Artificial Intelligence',
65 'Topic :: Scientific/Engineering :: Chemistry',
66 'Topic :: Scientific/Engineering :: Image Recognition',
67 'Topic :: Scientific/Engineering :: Information Analysis',
68 'Topic :: Scientific/Engineering :: Mathematics',
69 'Topic :: Scientific/Engineering :: Physics',
70 'Topic :: Software Development :: Libraries :: Python Modules',
71 ],
72 description=DESCRIPTION,
73 long_description=LONG_DESCRIPTION,
74 long_description_content_type='text/markdown',
75 license=LICENSE,
76 packages=find_packages(exclude=('test',)),
77 install_requires=REQUIREMENTS,
78 zip_safe=True,
79 )
80
81 setup(**setup_info)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
# Constants
DISTNAME = 'kymatio'
DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
-URL = 'https://kymatio.github.io'
+URL = 'https://www.kymat.io'
LICENSE = 'BSD-3-Clause'
@@ -47,15 +47,18 @@
'[email protected], [email protected], [email protected]'),
url=URL,
download_url='https://github.com/kymatio/kymatio/releases',
+ project_urls={
+ 'Documentation': 'https://www.kymat.io/codereference.html',
+ 'Source': 'https://github.com/kymatio/kymatio/',
+ 'Tracker': 'https://github.com/kymatio/kymatio/issues',
+ 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'
+ },
classifiers=['Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS',
- 'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
@@ -72,6 +75,7 @@
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
+ python_requires='>=3.6',
license=LICENSE,
packages=find_packages(exclude=('test',)),
install_requires=REQUIREMENTS,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n # Constants\n DISTNAME = 'kymatio'\n DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\n-URL = 'https://kymatio.github.io'\n+URL = 'https://www.kymat.io'\n LICENSE = 'BSD-3-Clause'\n \n \n@@ -47,15 +47,18 @@\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n+ project_urls={\n+ 'Documentation': 'https://www.kymat.io/codereference.html',\n+ 'Source': 'https://github.com/kymatio/kymatio/',\n+ 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n+ 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n+ },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n- 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n- 'Programming Language :: Python :: 3.4',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n@@ -72,6 +75,7 @@\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n+ python_requires='>=3.6',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n", "issue": "RELEASE requirements?\nHi,\r\n\r\nWhen doing:\r\n\r\n```\r\nconda install pytorch torchvision -c pytorch\r\npip install -i https://test.pypi.org/simple/ kymatio\r\n```\r\n\r\nthen cloning the corresponding release, and doing `pytest`, I have tons of errors that are evidences that none of the requirements.txt packages are installed. Actually, this makes me think we should have a pip for the GPU version(like `pip install kymatio-gpu`)?\r\n\r\nhttps://github.com/kymatio/kymatio/blob/master/requirements.txt\n", "before_files": [{"content": "#!/usr/bin/env python\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://kymatio.github.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://www.kymat.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n project_urls={\n 'Documentation': 'https://www.kymat.io/codereference.html',\n 'Source': 'https://github.com/kymatio/kymatio/',\n 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n python_requires='>=3.6',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}]} | 1,283 | 427 |
gh_patches_debug_29528 | rasdani/github-patches | git_diff | numba__numba-1527 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A reference count handling bug
https://groups.google.com/a/continuum.io/forum/?utm_medium=email&utm_source=footer#!msg/numba-users/8yKajxuVJ10/CqaZXGA_AwAJ
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/runtime/atomicops.py`
Content:
```
1 from __future__ import print_function, absolute_import, division
2
3 import re
4 from collections import defaultdict, deque
5
6 from numba.config import MACHINE_BITS
7 from numba import cgutils
8 from llvmlite import ir, binding as llvm
9
10 # Flag to enable debug print in NRT_incref and NRT_decref
11 _debug_print = False
12
13 _word_type = ir.IntType(MACHINE_BITS)
14 _pointer_type = ir.PointerType(ir.IntType(8))
15
16 _meminfo_struct_type = ir.LiteralStructType([
17 _word_type, # size_t refct
18 _pointer_type, # dtor_function dtor
19 _pointer_type, # void *dtor_info
20 _pointer_type, # void *data
21 _word_type, # size_t size
22 ])
23
24
25 incref_decref_ty = ir.FunctionType(ir.VoidType(), [_pointer_type])
26 meminfo_data_ty = ir.FunctionType(_pointer_type, [_pointer_type])
27
28
29 def _define_nrt_meminfo_data(module):
30 """
31 Implement NRT_MemInfo_data_fast in the module. This allows LLVM
32 to inline lookup of the data pointer.
33 """
34 fn = module.get_or_insert_function(meminfo_data_ty,
35 name="NRT_MemInfo_data_fast")
36 builder = ir.IRBuilder(fn.append_basic_block())
37 [ptr] = fn.args
38 struct_ptr = builder.bitcast(ptr, _meminfo_struct_type.as_pointer())
39 data_ptr = builder.load(cgutils.gep(builder, struct_ptr, 0, 3))
40 builder.ret(data_ptr)
41
42
43 def _define_nrt_incref(module, atomic_incr):
44 """
45 Implement NRT_incref in the module
46 """
47 fn_incref = module.get_or_insert_function(incref_decref_ty,
48 name="NRT_incref")
49 builder = ir.IRBuilder(fn_incref.append_basic_block())
50 [ptr] = fn_incref.args
51 is_null = builder.icmp_unsigned("==", ptr, cgutils.get_null_value(ptr.type))
52 with cgutils.if_unlikely(builder, is_null):
53 builder.ret_void()
54
55 if _debug_print:
56 cgutils.printf(builder, "*** NRT_Incref %zu [%p]\n", builder.load(ptr),
57 ptr)
58 builder.call(atomic_incr, [builder.bitcast(ptr, atomic_incr.args[0].type)])
59 builder.ret_void()
60
61
62 def _define_nrt_decref(module, atomic_decr):
63 """
64 Implement NRT_decref in the module
65 """
66 fn_decref = module.get_or_insert_function(incref_decref_ty,
67 name="NRT_decref")
68 calldtor = module.add_function(ir.FunctionType(ir.VoidType(), [_pointer_type]),
69 name="NRT_MemInfo_call_dtor")
70
71 builder = ir.IRBuilder(fn_decref.append_basic_block())
72 [ptr] = fn_decref.args
73 is_null = builder.icmp_unsigned("==", ptr, cgutils.get_null_value(ptr.type))
74 with cgutils.if_unlikely(builder, is_null):
75 builder.ret_void()
76
77 if _debug_print:
78 cgutils.printf(builder, "*** NRT_Decref %zu [%p]\n", builder.load(ptr),
79 ptr)
80 newrefct = builder.call(atomic_decr,
81 [builder.bitcast(ptr, atomic_decr.args[0].type)])
82
83 refct_eq_0 = builder.icmp_unsigned("==", newrefct,
84 ir.Constant(newrefct.type, 0))
85 with cgutils.if_unlikely(builder, refct_eq_0):
86 builder.call(calldtor, [ptr])
87 builder.ret_void()
88
89
90 # Set this to True to measure the overhead of atomic refcounts compared
91 # to non-atomic.
92 _disable_atomicity = 0
93
94
95 def _define_atomic_inc_dec(module, op, ordering):
96 """Define a llvm function for atomic increment/decrement to the given module
97 Argument ``op`` is the operation "add"/"sub". Argument ``ordering`` is
98 the memory ordering. The generated function returns the new value.
99 """
100 ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()])
101 fn_atomic = ir.Function(module, ftype, name="nrt_atomic_{0}".format(op))
102
103 [ptr] = fn_atomic.args
104 bb = fn_atomic.append_basic_block()
105 builder = ir.IRBuilder(bb)
106 ONE = ir.Constant(_word_type, 1)
107 if not _disable_atomicity:
108 oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering)
109 # Perform the operation on the old value so that we can pretend returning
110 # the "new" value.
111 res = getattr(builder, op)(oldval, ONE)
112 builder.ret(res)
113 else:
114 oldval = builder.load(ptr)
115 newval = getattr(builder, op)(oldval, ONE)
116 builder.store(newval, ptr)
117 builder.ret(oldval)
118
119 return fn_atomic
120
121
122 def _define_atomic_cas(module, ordering):
123 """Define a llvm function for atomic compare-and-swap.
124 The generated function is a direct wrapper of the LLVM cmpxchg with the
125 difference that the a int indicate success (1) or failure (0) is returned
126 and the last argument is a output pointer for storing the old value.
127
128 Note
129 ----
130 On failure, the generated function behaves like an atomic load. The loaded
131 value is stored to the last argument.
132 """
133 ftype = ir.FunctionType(ir.IntType(32), [_word_type.as_pointer(),
134 _word_type, _word_type,
135 _word_type.as_pointer()])
136 fn_cas = ir.Function(module, ftype, name="nrt_atomic_cas")
137
138 [ptr, cmp, repl, oldptr] = fn_cas.args
139 bb = fn_cas.append_basic_block()
140 builder = ir.IRBuilder(bb)
141 outtup = builder.cmpxchg(ptr, cmp, repl, ordering=ordering)
142 old, ok = cgutils.unpack_tuple(builder, outtup, 2)
143 builder.store(old, oldptr)
144 builder.ret(builder.zext(ok, ftype.return_type))
145
146 return fn_cas
147
148
149 def create_nrt_module(ctx):
150 """
151 Create an IR module defining the LLVM NRT functions.
152 A (IR module, library) tuple is returned.
153 """
154 codegen = ctx.codegen()
155 library = codegen.create_library("nrt")
156
157 # Implement LLVM module with atomic ops
158 ir_mod = library.create_ir_module("nrt_module")
159
160 atomic_inc = _define_atomic_inc_dec(ir_mod, "add", ordering='monotonic')
161 atomic_dec = _define_atomic_inc_dec(ir_mod, "sub", ordering='monotonic')
162 _define_atomic_cas(ir_mod, ordering='monotonic')
163
164 _define_nrt_meminfo_data(ir_mod)
165 _define_nrt_incref(ir_mod, atomic_inc)
166 _define_nrt_decref(ir_mod, atomic_dec)
167
168 return ir_mod, library
169
170
171 def compile_nrt_functions(ctx):
172 """
173 Compile all LLVM NRT functions and return a library containing them.
174 The library is created using the given target context.
175 """
176 ir_mod, library = create_nrt_module(ctx)
177
178 library.add_ir_module(ir_mod)
179 library.finalize()
180
181 return library
182
183
184 _regex_incref = re.compile(r'\s*call void @NRT_incref\((.*)\)')
185 _regex_decref = re.compile(r'\s*call void @NRT_decref\((.*)\)')
186 _regex_bb = re.compile(r'([-a-zA-Z$._][-a-zA-Z$._0-9]*:)|^define')
187
188
189 def remove_redundant_nrt_refct(ll_module):
190 """
191 Remove redundant reference count operations from the
192 `llvmlite.binding.ModuleRef`. This parses the ll_module as a string and
193 line by line to remove the unnecessary nrt refct pairs within each block.
194 Decref calls are moved after the last incref call in the block to avoid
195 temporarily decref'ing to zero (which can happen due to hidden decref from
196 alias).
197 """
198 # Note: As soon as we have better utility in analyzing materialized LLVM
199 # module in llvmlite, we can redo this without so much string
200 # processing.
201
202 def _extract_functions(module):
203 cur = []
204 for line in str(module).splitlines():
205 if line.startswith('define'):
206 # start of function
207 assert not cur
208 cur.append(line)
209 elif line.startswith('}'):
210 # end of function
211 assert cur
212 cur.append(line)
213 yield True, cur
214 cur = []
215 elif cur:
216 cur.append(line)
217 else:
218 yield False, [line]
219
220 def _process_function(func_lines):
221 out = []
222 for is_bb, bb_lines in _extract_basic_blocks(func_lines):
223 if is_bb and bb_lines:
224 bb_lines = _process_basic_block(bb_lines)
225 out += bb_lines
226 return out
227
228 def _extract_basic_blocks(func_lines):
229 assert func_lines[0].startswith('define')
230 assert func_lines[-1].startswith('}')
231 yield False, [func_lines[0]]
232
233 cur = []
234 for ln in func_lines[1:-1]:
235 m = _regex_bb.match(ln)
236 if m is not None:
237 # line is a basic block separator
238 yield True, cur
239 cur = []
240 yield False, [ln]
241 elif ln:
242 cur.append(ln)
243
244 yield True, cur
245 yield False, [func_lines[-1]]
246
247 def _process_basic_block(bb_lines):
248 bb_lines = _move_decref_after_all_increfs(bb_lines)
249 bb_lines = _prune_redundant_refct_ops(bb_lines)
250 return bb_lines
251
252 def _examine_refct_op(bb_lines):
253 for num, ln in enumerate(bb_lines):
254 m = _regex_incref.match(ln)
255 if m is not None:
256 yield num, m.group(1), None
257 continue
258
259 m = _regex_decref.match(ln)
260 if m is not None:
261 yield num, None, m.group(1)
262 continue
263
264 yield ln, None, None
265
266 def _prune_redundant_refct_ops(bb_lines):
267 incref_map = defaultdict(deque)
268 decref_map = defaultdict(deque)
269 for num, incref_var, decref_var in _examine_refct_op(bb_lines):
270 assert not (incref_var and decref_var)
271 if incref_var:
272 incref_map[incref_var].append(num)
273 elif decref_var:
274 decref_map[decref_var].append(num)
275
276 to_remove = set()
277 for var, decops in decref_map.items():
278 incops = incref_map[var]
279 ct = min(len(incops), len(decops))
280 for _ in range(ct):
281 to_remove.add(incops.pop())
282 to_remove.add(decops.popleft())
283
284 return [ln for num, ln in enumerate(bb_lines)
285 if num not in to_remove]
286
287 def _move_decref_after_all_increfs(bb_lines):
288 # find last incref
289 last_pos = 0
290 for pos, ln in enumerate(bb_lines):
291 if _regex_incref.match(ln) is not None:
292 last_pos = pos + 1
293
294 # find decrefs before last_pos
295 decrefs = []
296 head = []
297 for ln in bb_lines[:last_pos]:
298 if _regex_decref.match(ln) is not None:
299 decrefs.append(ln)
300 else:
301 head.append(ln)
302
303 # insert decrefs at last_pos
304 return head + decrefs + bb_lines[last_pos:]
305
306 # Early escape if NRT_incref is not used
307 try:
308 ll_module.get_function('NRT_incref')
309 except NameError:
310 return ll_module
311
312 processed = []
313
314 for is_func, lines in _extract_functions(ll_module):
315 if is_func:
316 lines = _process_function(lines)
317
318 processed += lines
319
320 newll = '\n'.join(processed)
321 return llvm.parse_assembly(newll)
322
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numba/runtime/atomicops.py b/numba/runtime/atomicops.py
--- a/numba/runtime/atomicops.py
+++ b/numba/runtime/atomicops.py
@@ -245,7 +245,7 @@
yield False, [func_lines[-1]]
def _process_basic_block(bb_lines):
- bb_lines = _move_decref_after_all_increfs(bb_lines)
+ bb_lines = _move_and_group_decref_after_all_increfs(bb_lines)
bb_lines = _prune_redundant_refct_ops(bb_lines)
return bb_lines
@@ -284,12 +284,20 @@
return [ln for num, ln in enumerate(bb_lines)
if num not in to_remove]
- def _move_decref_after_all_increfs(bb_lines):
+ def _move_and_group_decref_after_all_increfs(bb_lines):
# find last incref
- last_pos = 0
+ last_incref_pos = 0
for pos, ln in enumerate(bb_lines):
if _regex_incref.match(ln) is not None:
- last_pos = pos + 1
+ last_incref_pos = pos + 1
+
+ # find last decref
+ last_decref_pos = 0
+ for pos, ln in enumerate(bb_lines):
+ if _regex_decref.match(ln) is not None:
+ last_decref_pos = pos + 1
+
+ last_pos = max(last_incref_pos, last_decref_pos)
# find decrefs before last_pos
decrefs = []
@@ -303,6 +311,7 @@
# insert decrefs at last_pos
return head + decrefs + bb_lines[last_pos:]
+
# Early escape if NRT_incref is not used
try:
ll_module.get_function('NRT_incref')
| {"golden_diff": "diff --git a/numba/runtime/atomicops.py b/numba/runtime/atomicops.py\n--- a/numba/runtime/atomicops.py\n+++ b/numba/runtime/atomicops.py\n@@ -245,7 +245,7 @@\n yield False, [func_lines[-1]]\n \n def _process_basic_block(bb_lines):\n- bb_lines = _move_decref_after_all_increfs(bb_lines)\n+ bb_lines = _move_and_group_decref_after_all_increfs(bb_lines)\n bb_lines = _prune_redundant_refct_ops(bb_lines)\n return bb_lines\n \n@@ -284,12 +284,20 @@\n return [ln for num, ln in enumerate(bb_lines)\n if num not in to_remove]\n \n- def _move_decref_after_all_increfs(bb_lines):\n+ def _move_and_group_decref_after_all_increfs(bb_lines):\n # find last incref\n- last_pos = 0\n+ last_incref_pos = 0\n for pos, ln in enumerate(bb_lines):\n if _regex_incref.match(ln) is not None:\n- last_pos = pos + 1\n+ last_incref_pos = pos + 1\n+\n+ # find last decref\n+ last_decref_pos = 0\n+ for pos, ln in enumerate(bb_lines):\n+ if _regex_decref.match(ln) is not None:\n+ last_decref_pos = pos + 1\n+\n+ last_pos = max(last_incref_pos, last_decref_pos)\n \n # find decrefs before last_pos\n decrefs = []\n@@ -303,6 +311,7 @@\n # insert decrefs at last_pos\n return head + decrefs + bb_lines[last_pos:]\n \n+\n # Early escape if NRT_incref is not used\n try:\n ll_module.get_function('NRT_incref')\n", "issue": "A reference count handling bug\nhttps://groups.google.com/a/continuum.io/forum/?utm_medium=email&utm_source=footer#!msg/numba-users/8yKajxuVJ10/CqaZXGA_AwAJ\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nimport re\nfrom collections import defaultdict, deque\n\nfrom numba.config import MACHINE_BITS\nfrom numba import cgutils\nfrom llvmlite import ir, binding as llvm\n\n# Flag to enable debug print in NRT_incref and NRT_decref\n_debug_print = False\n\n_word_type = ir.IntType(MACHINE_BITS)\n_pointer_type = ir.PointerType(ir.IntType(8))\n\n_meminfo_struct_type = ir.LiteralStructType([\n _word_type, # size_t refct\n _pointer_type, # dtor_function dtor\n _pointer_type, # void *dtor_info\n _pointer_type, # void *data\n _word_type, # size_t size\n ])\n\n\nincref_decref_ty = ir.FunctionType(ir.VoidType(), [_pointer_type])\nmeminfo_data_ty = ir.FunctionType(_pointer_type, [_pointer_type])\n\n\ndef _define_nrt_meminfo_data(module):\n \"\"\"\n Implement NRT_MemInfo_data_fast in the module. This allows LLVM\n to inline lookup of the data pointer.\n \"\"\"\n fn = module.get_or_insert_function(meminfo_data_ty,\n name=\"NRT_MemInfo_data_fast\")\n builder = ir.IRBuilder(fn.append_basic_block())\n [ptr] = fn.args\n struct_ptr = builder.bitcast(ptr, _meminfo_struct_type.as_pointer())\n data_ptr = builder.load(cgutils.gep(builder, struct_ptr, 0, 3))\n builder.ret(data_ptr)\n\n\ndef _define_nrt_incref(module, atomic_incr):\n \"\"\"\n Implement NRT_incref in the module\n \"\"\"\n fn_incref = module.get_or_insert_function(incref_decref_ty,\n name=\"NRT_incref\")\n builder = ir.IRBuilder(fn_incref.append_basic_block())\n [ptr] = fn_incref.args\n is_null = builder.icmp_unsigned(\"==\", ptr, cgutils.get_null_value(ptr.type))\n with cgutils.if_unlikely(builder, is_null):\n builder.ret_void()\n\n if _debug_print:\n cgutils.printf(builder, \"*** NRT_Incref %zu [%p]\\n\", builder.load(ptr),\n ptr)\n builder.call(atomic_incr, [builder.bitcast(ptr, atomic_incr.args[0].type)])\n builder.ret_void()\n\n\ndef _define_nrt_decref(module, atomic_decr):\n \"\"\"\n Implement NRT_decref in the module\n \"\"\"\n fn_decref = module.get_or_insert_function(incref_decref_ty,\n name=\"NRT_decref\")\n calldtor = module.add_function(ir.FunctionType(ir.VoidType(), [_pointer_type]),\n name=\"NRT_MemInfo_call_dtor\")\n\n builder = ir.IRBuilder(fn_decref.append_basic_block())\n [ptr] = fn_decref.args\n is_null = builder.icmp_unsigned(\"==\", ptr, cgutils.get_null_value(ptr.type))\n with cgutils.if_unlikely(builder, is_null):\n builder.ret_void()\n\n if _debug_print:\n cgutils.printf(builder, \"*** NRT_Decref %zu [%p]\\n\", builder.load(ptr),\n ptr)\n newrefct = builder.call(atomic_decr,\n [builder.bitcast(ptr, atomic_decr.args[0].type)])\n\n refct_eq_0 = builder.icmp_unsigned(\"==\", newrefct,\n ir.Constant(newrefct.type, 0))\n with cgutils.if_unlikely(builder, refct_eq_0):\n builder.call(calldtor, [ptr])\n builder.ret_void()\n\n\n# Set this to True to measure the overhead of atomic refcounts compared\n# to non-atomic.\n_disable_atomicity = 0\n\n\ndef _define_atomic_inc_dec(module, op, ordering):\n \"\"\"Define a llvm function for atomic increment/decrement to the given module\n Argument ``op`` is the operation \"add\"/\"sub\". Argument ``ordering`` is\n the memory ordering. The generated function returns the new value.\n \"\"\"\n ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()])\n fn_atomic = ir.Function(module, ftype, name=\"nrt_atomic_{0}\".format(op))\n\n [ptr] = fn_atomic.args\n bb = fn_atomic.append_basic_block()\n builder = ir.IRBuilder(bb)\n ONE = ir.Constant(_word_type, 1)\n if not _disable_atomicity:\n oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering)\n # Perform the operation on the old value so that we can pretend returning\n # the \"new\" value.\n res = getattr(builder, op)(oldval, ONE)\n builder.ret(res)\n else:\n oldval = builder.load(ptr)\n newval = getattr(builder, op)(oldval, ONE)\n builder.store(newval, ptr)\n builder.ret(oldval)\n\n return fn_atomic\n\n\ndef _define_atomic_cas(module, ordering):\n \"\"\"Define a llvm function for atomic compare-and-swap.\n The generated function is a direct wrapper of the LLVM cmpxchg with the\n difference that the a int indicate success (1) or failure (0) is returned\n and the last argument is a output pointer for storing the old value.\n\n Note\n ----\n On failure, the generated function behaves like an atomic load. The loaded\n value is stored to the last argument.\n \"\"\"\n ftype = ir.FunctionType(ir.IntType(32), [_word_type.as_pointer(),\n _word_type, _word_type,\n _word_type.as_pointer()])\n fn_cas = ir.Function(module, ftype, name=\"nrt_atomic_cas\")\n\n [ptr, cmp, repl, oldptr] = fn_cas.args\n bb = fn_cas.append_basic_block()\n builder = ir.IRBuilder(bb)\n outtup = builder.cmpxchg(ptr, cmp, repl, ordering=ordering)\n old, ok = cgutils.unpack_tuple(builder, outtup, 2)\n builder.store(old, oldptr)\n builder.ret(builder.zext(ok, ftype.return_type))\n\n return fn_cas\n\n\ndef create_nrt_module(ctx):\n \"\"\"\n Create an IR module defining the LLVM NRT functions.\n A (IR module, library) tuple is returned.\n \"\"\"\n codegen = ctx.codegen()\n library = codegen.create_library(\"nrt\")\n\n # Implement LLVM module with atomic ops\n ir_mod = library.create_ir_module(\"nrt_module\")\n\n atomic_inc = _define_atomic_inc_dec(ir_mod, \"add\", ordering='monotonic')\n atomic_dec = _define_atomic_inc_dec(ir_mod, \"sub\", ordering='monotonic')\n _define_atomic_cas(ir_mod, ordering='monotonic')\n\n _define_nrt_meminfo_data(ir_mod)\n _define_nrt_incref(ir_mod, atomic_inc)\n _define_nrt_decref(ir_mod, atomic_dec)\n\n return ir_mod, library\n\n\ndef compile_nrt_functions(ctx):\n \"\"\"\n Compile all LLVM NRT functions and return a library containing them.\n The library is created using the given target context.\n \"\"\"\n ir_mod, library = create_nrt_module(ctx)\n\n library.add_ir_module(ir_mod)\n library.finalize()\n\n return library\n\n\n_regex_incref = re.compile(r'\\s*call void @NRT_incref\\((.*)\\)')\n_regex_decref = re.compile(r'\\s*call void @NRT_decref\\((.*)\\)')\n_regex_bb = re.compile(r'([-a-zA-Z$._][-a-zA-Z$._0-9]*:)|^define')\n\n\ndef remove_redundant_nrt_refct(ll_module):\n \"\"\"\n Remove redundant reference count operations from the\n `llvmlite.binding.ModuleRef`. This parses the ll_module as a string and\n line by line to remove the unnecessary nrt refct pairs within each block.\n Decref calls are moved after the last incref call in the block to avoid\n temporarily decref'ing to zero (which can happen due to hidden decref from\n alias).\n \"\"\"\n # Note: As soon as we have better utility in analyzing materialized LLVM\n # module in llvmlite, we can redo this without so much string\n # processing.\n\n def _extract_functions(module):\n cur = []\n for line in str(module).splitlines():\n if line.startswith('define'):\n # start of function\n assert not cur\n cur.append(line)\n elif line.startswith('}'):\n # end of function\n assert cur\n cur.append(line)\n yield True, cur\n cur = []\n elif cur:\n cur.append(line)\n else:\n yield False, [line]\n\n def _process_function(func_lines):\n out = []\n for is_bb, bb_lines in _extract_basic_blocks(func_lines):\n if is_bb and bb_lines:\n bb_lines = _process_basic_block(bb_lines)\n out += bb_lines\n return out\n\n def _extract_basic_blocks(func_lines):\n assert func_lines[0].startswith('define')\n assert func_lines[-1].startswith('}')\n yield False, [func_lines[0]]\n\n cur = []\n for ln in func_lines[1:-1]:\n m = _regex_bb.match(ln)\n if m is not None:\n # line is a basic block separator\n yield True, cur\n cur = []\n yield False, [ln]\n elif ln:\n cur.append(ln)\n\n yield True, cur\n yield False, [func_lines[-1]]\n\n def _process_basic_block(bb_lines):\n bb_lines = _move_decref_after_all_increfs(bb_lines)\n bb_lines = _prune_redundant_refct_ops(bb_lines)\n return bb_lines\n\n def _examine_refct_op(bb_lines):\n for num, ln in enumerate(bb_lines):\n m = _regex_incref.match(ln)\n if m is not None:\n yield num, m.group(1), None\n continue\n\n m = _regex_decref.match(ln)\n if m is not None:\n yield num, None, m.group(1)\n continue\n\n yield ln, None, None\n\n def _prune_redundant_refct_ops(bb_lines):\n incref_map = defaultdict(deque)\n decref_map = defaultdict(deque)\n for num, incref_var, decref_var in _examine_refct_op(bb_lines):\n assert not (incref_var and decref_var)\n if incref_var:\n incref_map[incref_var].append(num)\n elif decref_var:\n decref_map[decref_var].append(num)\n\n to_remove = set()\n for var, decops in decref_map.items():\n incops = incref_map[var]\n ct = min(len(incops), len(decops))\n for _ in range(ct):\n to_remove.add(incops.pop())\n to_remove.add(decops.popleft())\n\n return [ln for num, ln in enumerate(bb_lines)\n if num not in to_remove]\n\n def _move_decref_after_all_increfs(bb_lines):\n # find last incref\n last_pos = 0\n for pos, ln in enumerate(bb_lines):\n if _regex_incref.match(ln) is not None:\n last_pos = pos + 1\n\n # find decrefs before last_pos\n decrefs = []\n head = []\n for ln in bb_lines[:last_pos]:\n if _regex_decref.match(ln) is not None:\n decrefs.append(ln)\n else:\n head.append(ln)\n\n # insert decrefs at last_pos\n return head + decrefs + bb_lines[last_pos:]\n\n # Early escape if NRT_incref is not used\n try:\n ll_module.get_function('NRT_incref')\n except NameError:\n return ll_module\n\n processed = []\n\n for is_func, lines in _extract_functions(ll_module):\n if is_func:\n lines = _process_function(lines)\n\n processed += lines\n\n newll = '\\n'.join(processed)\n return llvm.parse_assembly(newll)\n", "path": "numba/runtime/atomicops.py"}], "after_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nimport re\nfrom collections import defaultdict, deque\n\nfrom numba.config import MACHINE_BITS\nfrom numba import cgutils\nfrom llvmlite import ir, binding as llvm\n\n# Flag to enable debug print in NRT_incref and NRT_decref\n_debug_print = False\n\n_word_type = ir.IntType(MACHINE_BITS)\n_pointer_type = ir.PointerType(ir.IntType(8))\n\n_meminfo_struct_type = ir.LiteralStructType([\n _word_type, # size_t refct\n _pointer_type, # dtor_function dtor\n _pointer_type, # void *dtor_info\n _pointer_type, # void *data\n _word_type, # size_t size\n ])\n\n\nincref_decref_ty = ir.FunctionType(ir.VoidType(), [_pointer_type])\nmeminfo_data_ty = ir.FunctionType(_pointer_type, [_pointer_type])\n\n\ndef _define_nrt_meminfo_data(module):\n \"\"\"\n Implement NRT_MemInfo_data_fast in the module. This allows LLVM\n to inline lookup of the data pointer.\n \"\"\"\n fn = module.get_or_insert_function(meminfo_data_ty,\n name=\"NRT_MemInfo_data_fast\")\n builder = ir.IRBuilder(fn.append_basic_block())\n [ptr] = fn.args\n struct_ptr = builder.bitcast(ptr, _meminfo_struct_type.as_pointer())\n data_ptr = builder.load(cgutils.gep(builder, struct_ptr, 0, 3))\n builder.ret(data_ptr)\n\n\ndef _define_nrt_incref(module, atomic_incr):\n \"\"\"\n Implement NRT_incref in the module\n \"\"\"\n fn_incref = module.get_or_insert_function(incref_decref_ty,\n name=\"NRT_incref\")\n builder = ir.IRBuilder(fn_incref.append_basic_block())\n [ptr] = fn_incref.args\n is_null = builder.icmp_unsigned(\"==\", ptr, cgutils.get_null_value(ptr.type))\n with cgutils.if_unlikely(builder, is_null):\n builder.ret_void()\n\n if _debug_print:\n cgutils.printf(builder, \"*** NRT_Incref %zu [%p]\\n\", builder.load(ptr),\n ptr)\n builder.call(atomic_incr, [builder.bitcast(ptr, atomic_incr.args[0].type)])\n builder.ret_void()\n\n\ndef _define_nrt_decref(module, atomic_decr):\n \"\"\"\n Implement NRT_decref in the module\n \"\"\"\n fn_decref = module.get_or_insert_function(incref_decref_ty,\n name=\"NRT_decref\")\n calldtor = module.add_function(ir.FunctionType(ir.VoidType(), [_pointer_type]),\n name=\"NRT_MemInfo_call_dtor\")\n\n builder = ir.IRBuilder(fn_decref.append_basic_block())\n [ptr] = fn_decref.args\n is_null = builder.icmp_unsigned(\"==\", ptr, cgutils.get_null_value(ptr.type))\n with cgutils.if_unlikely(builder, is_null):\n builder.ret_void()\n\n if _debug_print:\n cgutils.printf(builder, \"*** NRT_Decref %zu [%p]\\n\", builder.load(ptr),\n ptr)\n newrefct = builder.call(atomic_decr,\n [builder.bitcast(ptr, atomic_decr.args[0].type)])\n\n refct_eq_0 = builder.icmp_unsigned(\"==\", newrefct,\n ir.Constant(newrefct.type, 0))\n with cgutils.if_unlikely(builder, refct_eq_0):\n builder.call(calldtor, [ptr])\n builder.ret_void()\n\n\n# Set this to True to measure the overhead of atomic refcounts compared\n# to non-atomic.\n_disable_atomicity = 0\n\n\ndef _define_atomic_inc_dec(module, op, ordering):\n \"\"\"Define a llvm function for atomic increment/decrement to the given module\n Argument ``op`` is the operation \"add\"/\"sub\". Argument ``ordering`` is\n the memory ordering. The generated function returns the new value.\n \"\"\"\n ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()])\n fn_atomic = ir.Function(module, ftype, name=\"nrt_atomic_{0}\".format(op))\n\n [ptr] = fn_atomic.args\n bb = fn_atomic.append_basic_block()\n builder = ir.IRBuilder(bb)\n ONE = ir.Constant(_word_type, 1)\n if not _disable_atomicity:\n oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering)\n # Perform the operation on the old value so that we can pretend returning\n # the \"new\" value.\n res = getattr(builder, op)(oldval, ONE)\n builder.ret(res)\n else:\n oldval = builder.load(ptr)\n newval = getattr(builder, op)(oldval, ONE)\n builder.store(newval, ptr)\n builder.ret(oldval)\n\n return fn_atomic\n\n\ndef _define_atomic_cas(module, ordering):\n \"\"\"Define a llvm function for atomic compare-and-swap.\n The generated function is a direct wrapper of the LLVM cmpxchg with the\n difference that the a int indicate success (1) or failure (0) is returned\n and the last argument is a output pointer for storing the old value.\n\n Note\n ----\n On failure, the generated function behaves like an atomic load. The loaded\n value is stored to the last argument.\n \"\"\"\n ftype = ir.FunctionType(ir.IntType(32), [_word_type.as_pointer(),\n _word_type, _word_type,\n _word_type.as_pointer()])\n fn_cas = ir.Function(module, ftype, name=\"nrt_atomic_cas\")\n\n [ptr, cmp, repl, oldptr] = fn_cas.args\n bb = fn_cas.append_basic_block()\n builder = ir.IRBuilder(bb)\n outtup = builder.cmpxchg(ptr, cmp, repl, ordering=ordering)\n old, ok = cgutils.unpack_tuple(builder, outtup, 2)\n builder.store(old, oldptr)\n builder.ret(builder.zext(ok, ftype.return_type))\n\n return fn_cas\n\n\ndef create_nrt_module(ctx):\n \"\"\"\n Create an IR module defining the LLVM NRT functions.\n A (IR module, library) tuple is returned.\n \"\"\"\n codegen = ctx.codegen()\n library = codegen.create_library(\"nrt\")\n\n # Implement LLVM module with atomic ops\n ir_mod = library.create_ir_module(\"nrt_module\")\n\n atomic_inc = _define_atomic_inc_dec(ir_mod, \"add\", ordering='monotonic')\n atomic_dec = _define_atomic_inc_dec(ir_mod, \"sub\", ordering='monotonic')\n _define_atomic_cas(ir_mod, ordering='monotonic')\n\n _define_nrt_meminfo_data(ir_mod)\n _define_nrt_incref(ir_mod, atomic_inc)\n _define_nrt_decref(ir_mod, atomic_dec)\n\n return ir_mod, library\n\n\ndef compile_nrt_functions(ctx):\n \"\"\"\n Compile all LLVM NRT functions and return a library containing them.\n The library is created using the given target context.\n \"\"\"\n ir_mod, library = create_nrt_module(ctx)\n\n library.add_ir_module(ir_mod)\n library.finalize()\n\n return library\n\n\n_regex_incref = re.compile(r'\\s*call void @NRT_incref\\((.*)\\)')\n_regex_decref = re.compile(r'\\s*call void @NRT_decref\\((.*)\\)')\n_regex_bb = re.compile(r'([-a-zA-Z$._][-a-zA-Z$._0-9]*:)|^define')\n\n\ndef remove_redundant_nrt_refct(ll_module):\n \"\"\"\n Remove redundant reference count operations from the\n `llvmlite.binding.ModuleRef`. This parses the ll_module as a string and\n line by line to remove the unnecessary nrt refct pairs within each block.\n Decref calls are moved after the last incref call in the block to avoid\n temporarily decref'ing to zero (which can happen due to hidden decref from\n alias).\n \"\"\"\n # Note: As soon as we have better utility in analyzing materialized LLVM\n # module in llvmlite, we can redo this without so much string\n # processing.\n\n def _extract_functions(module):\n cur = []\n for line in str(module).splitlines():\n if line.startswith('define'):\n # start of function\n assert not cur\n cur.append(line)\n elif line.startswith('}'):\n # end of function\n assert cur\n cur.append(line)\n yield True, cur\n cur = []\n elif cur:\n cur.append(line)\n else:\n yield False, [line]\n\n def _process_function(func_lines):\n out = []\n for is_bb, bb_lines in _extract_basic_blocks(func_lines):\n if is_bb and bb_lines:\n bb_lines = _process_basic_block(bb_lines)\n out += bb_lines\n return out\n\n def _extract_basic_blocks(func_lines):\n assert func_lines[0].startswith('define')\n assert func_lines[-1].startswith('}')\n yield False, [func_lines[0]]\n\n cur = []\n for ln in func_lines[1:-1]:\n m = _regex_bb.match(ln)\n if m is not None:\n # line is a basic block separator\n yield True, cur\n cur = []\n yield False, [ln]\n elif ln:\n cur.append(ln)\n\n yield True, cur\n yield False, [func_lines[-1]]\n\n def _process_basic_block(bb_lines):\n bb_lines = _move_and_group_decref_after_all_increfs(bb_lines)\n bb_lines = _prune_redundant_refct_ops(bb_lines)\n return bb_lines\n\n def _examine_refct_op(bb_lines):\n for num, ln in enumerate(bb_lines):\n m = _regex_incref.match(ln)\n if m is not None:\n yield num, m.group(1), None\n continue\n\n m = _regex_decref.match(ln)\n if m is not None:\n yield num, None, m.group(1)\n continue\n\n yield ln, None, None\n\n def _prune_redundant_refct_ops(bb_lines):\n incref_map = defaultdict(deque)\n decref_map = defaultdict(deque)\n for num, incref_var, decref_var in _examine_refct_op(bb_lines):\n assert not (incref_var and decref_var)\n if incref_var:\n incref_map[incref_var].append(num)\n elif decref_var:\n decref_map[decref_var].append(num)\n\n to_remove = set()\n for var, decops in decref_map.items():\n incops = incref_map[var]\n ct = min(len(incops), len(decops))\n for _ in range(ct):\n to_remove.add(incops.pop())\n to_remove.add(decops.popleft())\n\n return [ln for num, ln in enumerate(bb_lines)\n if num not in to_remove]\n\n def _move_and_group_decref_after_all_increfs(bb_lines):\n # find last incref\n last_incref_pos = 0\n for pos, ln in enumerate(bb_lines):\n if _regex_incref.match(ln) is not None:\n last_incref_pos = pos + 1\n\n # find last decref\n last_decref_pos = 0\n for pos, ln in enumerate(bb_lines):\n if _regex_decref.match(ln) is not None:\n last_decref_pos = pos + 1\n\n last_pos = max(last_incref_pos, last_decref_pos)\n\n # find decrefs before last_pos\n decrefs = []\n head = []\n for ln in bb_lines[:last_pos]:\n if _regex_decref.match(ln) is not None:\n decrefs.append(ln)\n else:\n head.append(ln)\n\n # insert decrefs at last_pos\n return head + decrefs + bb_lines[last_pos:]\n\n\n # Early escape if NRT_incref is not used\n try:\n ll_module.get_function('NRT_incref')\n except NameError:\n return ll_module\n\n processed = []\n\n for is_func, lines in _extract_functions(ll_module):\n if is_func:\n lines = _process_function(lines)\n\n processed += lines\n\n newll = '\\n'.join(processed)\n return llvm.parse_assembly(newll)\n", "path": "numba/runtime/atomicops.py"}]} | 3,871 | 442 |
gh_patches_debug_6618 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1687 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
REPL client help is missing some commands
The help for the REPL client is missing some commands from `help`, and instead has `client.{c[0]}`.
```
pymodbus.console tcp
❯ pymodbus.console tcp
----------------------------------------------------------------------------
__________ _____ .___ __________ .__
\______ \___.__. / \ ____ __| _/ \______ \ ____ ______ | |
| ___< | |/ \ / \ / _ \ / __ | | _// __ \\\____ \| |
| | \___ / Y ( <_> ) /_/ | | | \ ___/| |_> > |__
|____| / ____\____|__ /\____/\____ | /\ |____|_ /\___ > __/|____/
\/ \/ \/ \/ \/ \/|__|
v1.3.0 - 3.5.0dev
----------------------------------------------------------------------------
> help
Available commands:
<...>
client.{c[0]}
```
This was caught by a `ruff` rule.
> RUF011 Dictionary comprehension uses static key: `"client.{c[0]}"`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/repl/client/helper.py`
Content:
```
1 """Helper Module for REPL actions."""
2 import inspect
3
4 # pylint: disable=missing-type-doc
5 import json
6 from collections import OrderedDict
7 from typing import Any, Dict, List, Union
8
9 import pygments
10 from prompt_toolkit import print_formatted_text
11 from prompt_toolkit.formatted_text import HTML, PygmentsTokens
12 from pygments.lexers.data import JsonLexer
13
14 from pymodbus.payload import BinaryPayloadDecoder, Endian
15
16
17 predicate = inspect.isfunction
18 argspec = inspect.signature
19
20
21 FORMATTERS = {
22 "int8": "decode_8bit_int",
23 "int16": "decode_16bit_int",
24 "int32": "decode_32bit_int",
25 "int64": "decode_64bit_int",
26 "uint8": "decode_8bit_uint",
27 "uint16": "decode_16bit_uint",
28 "uint32": "decode_32bit_uint",
29 "uint64": "decode_64bit_int",
30 "float16": "decode_16bit_float",
31 "float32": "decode_32bit_float",
32 "float64": "decode_64bit_float",
33 }
34
35
36 DEFAULT_KWARGS = {"slave": "Slave address"}
37
38 OTHER_COMMANDS = {
39 "result.raw": "Show RAW Result",
40 "result.decode": "Decode register response to known formats",
41 }
42 EXCLUDE = ["execute", "recv", "send", "trace", "set_debug"]
43 CLIENT_METHODS = [
44 "connect",
45 "close",
46 "idle_time",
47 "is_socket_open",
48 "get_port",
49 "set_port",
50 "get_stopbits",
51 "set_stopbits",
52 "get_bytesize",
53 "set_bytesize",
54 "get_parity",
55 "set_parity",
56 "get_baudrate",
57 "set_baudrate",
58 "get_timeout",
59 "set_timeout",
60 "get_serial_settings",
61 ]
62 CLIENT_ATTRIBUTES: List[str] = []
63
64
65 class Command:
66 """Class representing Commands to be consumed by Completer."""
67
68 def __init__(self, name, signature, doc, slave=False):
69 """Initialize.
70
71 :param name: Name of the command
72 :param signature: inspect object
73 :param doc: Doc string for the command
74 :param slave: Use slave as additional argument in the command .
75 """
76 self.name = name
77 self.doc = doc.split("\n") if doc else " ".join(name.split("_"))
78 self.help_text = self._create_help()
79 self.param_help = self._create_arg_help()
80 if signature:
81 self._params = signature.parameters
82 self.args = self.create_completion()
83 else:
84 self._params = ""
85
86 if self.name.startswith("client.") and slave:
87 self.args.update(**DEFAULT_KWARGS)
88
89 def _create_help(self):
90 """Create help."""
91 doc = filter(lambda d: d, self.doc)
92 cmd_help = list(
93 filter(
94 lambda x: not x.startswith(":param") and not x.startswith(":return"),
95 doc,
96 )
97 )
98 return " ".join(cmd_help).strip()
99
100 def _create_arg_help(self):
101 """Create arg help."""
102 param_dict = {}
103 params = list(filter(lambda d: d.strip().startswith(":param"), self.doc))
104 for param in params:
105 param, param_help = param.split(":param")[1].strip().split(":")
106 param_dict[param] = param_help
107 return param_dict
108
109 def create_completion(self):
110 """Create command completion meta data.
111
112 :return:
113 """
114 words = {}
115
116 def _create(entry, default):
117 if entry not in ["self", "kwargs"]:
118 if isinstance(default, (int, str)):
119 entry += f"={default}"
120 return entry
121 return None
122
123 for arg in self._params.values():
124 if entry := _create(arg.name, arg.default):
125 entry, meta = self.get_meta(entry)
126 words[entry] = meta
127
128 return words
129
130 def get_completion(self):
131 """Get a list of completions.
132
133 :return:
134 """
135 return self.args.keys()
136
137 def get_meta(self, cmd):
138 """Get Meta info of a given command.
139
140 :param cmd: Name of command.
141 :return: Dict containing meta info.
142 """
143 cmd = cmd.strip()
144 cmd = cmd.split("=")[0].strip()
145 return cmd, self.param_help.get(cmd, "")
146
147 def __str__(self):
148 """Return string representation."""
149 if self.doc:
150 return "Command {:>50}{:<20}".format( # pylint: disable=consider-using-f-string
151 self.name, self.doc
152 )
153 return f"Command {self.name}"
154
155
156 def _get_requests(members):
157 """Get requests."""
158 commands = list(
159 filter(
160 lambda x: (
161 x[0] not in EXCLUDE and x[0] not in CLIENT_METHODS and callable(x[1])
162 ),
163 members,
164 )
165 )
166 commands = {
167 f"client.{c[0]}": Command(
168 f"client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
169 )
170 for c in commands
171 if not c[0].startswith("_")
172 }
173 return commands
174
175
176 def _get_client_methods(members):
177 """Get client methods."""
178 commands = list(
179 filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)
180 )
181 commands = {
182 "client.{c[0]}": Command(
183 "client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
184 )
185 for c in commands
186 if not c[0].startswith("_")
187 }
188 return commands
189
190
191 def _get_client_properties(members):
192 """Get client properties."""
193 global CLIENT_ATTRIBUTES # pylint: disable=global-variable-not-assigned
194 commands = list(filter(lambda x: not callable(x[1]), members))
195 commands = {
196 f"client.{c[0]}": Command(f"client.{c[0]}", None, "Read Only!", slave=False)
197 for c in commands
198 if (not c[0].startswith("_") and isinstance(c[1], (str, int, float)))
199 }
200 CLIENT_ATTRIBUTES.extend(list(commands.keys()))
201 return commands
202
203
204 def get_commands(client):
205 """Retrieve all required methods and attributes.
206
207 Of a client object and convert it to commands.
208
209 :param client: Modbus Client object.
210 :return:
211 """
212 commands = {}
213 members = inspect.getmembers(client)
214 requests = _get_requests(members)
215 client_methods = _get_client_methods(members)
216 client_attr = _get_client_properties(members)
217
218 result_commands = inspect.getmembers(Result, predicate=predicate)
219 result_commands = {
220 f"result.{c[0]}": Command(f"result.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]))
221 for c in result_commands
222 if (not c[0].startswith("_") and c[0] != "print_result")
223 }
224 commands.update(requests)
225 commands.update(client_methods)
226 commands.update(client_attr)
227 commands.update(result_commands)
228 return commands
229
230
231 class Result:
232 """Represent result command."""
233
234 function_code: int = None
235 data: Union[Dict[int, Any], Any] = None
236
237 def __init__(self, result):
238 """Initialize.
239
240 :param result: Response of a modbus command.
241 """
242 if isinstance(result, dict): # Modbus response
243 self.function_code = result.pop("function_code", None)
244 self.data = dict(result)
245 else:
246 self.data = result
247
248 def decode(self, formatters, byte_order="big", word_order="big"):
249 """Decode the register response to known formatters.
250
251 :param formatters: int8/16/32/64, uint8/16/32/64, float32/64
252 :param byte_order: little/big
253 :param word_order: little/big
254 """
255 # Read Holding Registers (3)
256 # Read Input Registers (4)
257 # Read Write Registers (23)
258 if not isinstance(formatters, (list, tuple)):
259 formatters = [formatters]
260
261 if self.function_code not in [3, 4, 23]:
262 print_formatted_text(HTML("<red>Decoder works only for registers!!</red>"))
263 return
264 byte_order = (
265 Endian.Little if byte_order.strip().lower() == "little" else Endian.Big
266 )
267 word_order = (
268 Endian.Little if word_order.strip().lower() == "little" else Endian.Big
269 )
270 decoder = BinaryPayloadDecoder.fromRegisters(
271 self.data.get("registers"), byteorder=byte_order, wordorder=word_order
272 )
273 for formatter in formatters:
274 if not (formatter := FORMATTERS.get(formatter)):
275 print_formatted_text(
276 HTML(f"<red>Invalid Formatter - {formatter}!!</red>")
277 )
278 return
279 decoded = getattr(decoder, formatter)()
280 self.print_result(decoded)
281
282 def raw(self):
283 """Return raw result dict."""
284 self.print_result()
285
286 def _process_dict(self, use_dict):
287 """Process dict."""
288 new_dict = OrderedDict()
289 for k, v_item in use_dict.items():
290 if isinstance(v_item, bytes):
291 v_item = v_item.decode("utf-8")
292 elif isinstance(v_item, dict):
293 v_item = self._process_dict(v_item)
294 elif isinstance(v_item, (list, tuple)):
295 v_item = [
296 v1.decode("utf-8") if isinstance(v1, bytes) else v1 for v1 in v_item
297 ]
298 new_dict[k] = v_item
299 return new_dict
300
301 def print_result(self, data=None):
302 """Print result object pretty.
303
304 :param data: Data to be printed.
305 """
306 data = data or self.data
307 if isinstance(data, dict):
308 data = self._process_dict(data)
309 elif isinstance(data, (list, tuple)):
310 data = [v.decode("utf-8") if isinstance(v, bytes) else v for v in data]
311 elif isinstance(data, bytes):
312 data = data.decode("utf-8")
313 tokens = list(pygments.lex(json.dumps(data, indent=4), lexer=JsonLexer()))
314 print_formatted_text(PygmentsTokens(tokens))
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymodbus/repl/client/helper.py b/pymodbus/repl/client/helper.py
--- a/pymodbus/repl/client/helper.py
+++ b/pymodbus/repl/client/helper.py
@@ -179,8 +179,8 @@
filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)
)
commands = {
- "client.{c[0]}": Command(
- "client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
+ f"client.{c[0]}": Command(
+ f"client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
)
for c in commands
if not c[0].startswith("_")
| {"golden_diff": "diff --git a/pymodbus/repl/client/helper.py b/pymodbus/repl/client/helper.py\n--- a/pymodbus/repl/client/helper.py\n+++ b/pymodbus/repl/client/helper.py\n@@ -179,8 +179,8 @@\n filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)\n )\n commands = {\n- \"client.{c[0]}\": Command(\n- \"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n+ f\"client.{c[0]}\": Command(\n+ f\"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n", "issue": "REPL client help is missing some commands\nThe help for the REPL client is missing some commands from `help`, and instead has `client.{c[0]}`.\r\n\r\n```\r\npymodbus.console tcp\r\n\u276f pymodbus.console tcp\r\n\r\n----------------------------------------------------------------------------\r\n__________ _____ .___ __________ .__\r\n\\______ \\___.__. / \\ ____ __| _/ \\______ \\ ____ ______ | |\r\n | ___< | |/ \\ / \\ / _ \\ / __ | | _// __ \\\\\\____ \\| |\r\n | | \\___ / Y ( <_> ) /_/ | | | \\ ___/| |_> > |__\r\n |____| / ____\\____|__ /\\____/\\____ | /\\ |____|_ /\\___ > __/|____/\r\n \\/ \\/ \\/ \\/ \\/ \\/|__|\r\n v1.3.0 - 3.5.0dev\r\n----------------------------------------------------------------------------\r\n\r\n> help\r\nAvailable commands:\r\n<...>\r\nclient.{c[0]}\r\n```\r\n\r\nThis was caught by a `ruff` rule.\r\n> RUF011 Dictionary comprehension uses static key: `\"client.{c[0]}\"`\r\n\n", "before_files": [{"content": "\"\"\"Helper Module for REPL actions.\"\"\"\nimport inspect\n\n# pylint: disable=missing-type-doc\nimport json\nfrom collections import OrderedDict\nfrom typing import Any, Dict, List, Union\n\nimport pygments\nfrom prompt_toolkit import print_formatted_text\nfrom prompt_toolkit.formatted_text import HTML, PygmentsTokens\nfrom pygments.lexers.data import JsonLexer\n\nfrom pymodbus.payload import BinaryPayloadDecoder, Endian\n\n\npredicate = inspect.isfunction\nargspec = inspect.signature\n\n\nFORMATTERS = {\n \"int8\": \"decode_8bit_int\",\n \"int16\": \"decode_16bit_int\",\n \"int32\": \"decode_32bit_int\",\n \"int64\": \"decode_64bit_int\",\n \"uint8\": \"decode_8bit_uint\",\n \"uint16\": \"decode_16bit_uint\",\n \"uint32\": \"decode_32bit_uint\",\n \"uint64\": \"decode_64bit_int\",\n \"float16\": \"decode_16bit_float\",\n \"float32\": \"decode_32bit_float\",\n \"float64\": \"decode_64bit_float\",\n}\n\n\nDEFAULT_KWARGS = {\"slave\": \"Slave address\"}\n\nOTHER_COMMANDS = {\n \"result.raw\": \"Show RAW Result\",\n \"result.decode\": \"Decode register response to known formats\",\n}\nEXCLUDE = [\"execute\", \"recv\", \"send\", \"trace\", \"set_debug\"]\nCLIENT_METHODS = [\n \"connect\",\n \"close\",\n \"idle_time\",\n \"is_socket_open\",\n \"get_port\",\n \"set_port\",\n \"get_stopbits\",\n \"set_stopbits\",\n \"get_bytesize\",\n \"set_bytesize\",\n \"get_parity\",\n \"set_parity\",\n \"get_baudrate\",\n \"set_baudrate\",\n \"get_timeout\",\n \"set_timeout\",\n \"get_serial_settings\",\n]\nCLIENT_ATTRIBUTES: List[str] = []\n\n\nclass Command:\n \"\"\"Class representing Commands to be consumed by Completer.\"\"\"\n\n def __init__(self, name, signature, doc, slave=False):\n \"\"\"Initialize.\n\n :param name: Name of the command\n :param signature: inspect object\n :param doc: Doc string for the command\n :param slave: Use slave as additional argument in the command .\n \"\"\"\n self.name = name\n self.doc = doc.split(\"\\n\") if doc else \" \".join(name.split(\"_\"))\n self.help_text = self._create_help()\n self.param_help = self._create_arg_help()\n if signature:\n self._params = signature.parameters\n self.args = self.create_completion()\n else:\n self._params = \"\"\n\n if self.name.startswith(\"client.\") and slave:\n self.args.update(**DEFAULT_KWARGS)\n\n def _create_help(self):\n \"\"\"Create help.\"\"\"\n doc = filter(lambda d: d, self.doc)\n cmd_help = list(\n filter(\n lambda x: not x.startswith(\":param\") and not x.startswith(\":return\"),\n doc,\n )\n )\n return \" \".join(cmd_help).strip()\n\n def _create_arg_help(self):\n \"\"\"Create arg help.\"\"\"\n param_dict = {}\n params = list(filter(lambda d: d.strip().startswith(\":param\"), self.doc))\n for param in params:\n param, param_help = param.split(\":param\")[1].strip().split(\":\")\n param_dict[param] = param_help\n return param_dict\n\n def create_completion(self):\n \"\"\"Create command completion meta data.\n\n :return:\n \"\"\"\n words = {}\n\n def _create(entry, default):\n if entry not in [\"self\", \"kwargs\"]:\n if isinstance(default, (int, str)):\n entry += f\"={default}\"\n return entry\n return None\n\n for arg in self._params.values():\n if entry := _create(arg.name, arg.default):\n entry, meta = self.get_meta(entry)\n words[entry] = meta\n\n return words\n\n def get_completion(self):\n \"\"\"Get a list of completions.\n\n :return:\n \"\"\"\n return self.args.keys()\n\n def get_meta(self, cmd):\n \"\"\"Get Meta info of a given command.\n\n :param cmd: Name of command.\n :return: Dict containing meta info.\n \"\"\"\n cmd = cmd.strip()\n cmd = cmd.split(\"=\")[0].strip()\n return cmd, self.param_help.get(cmd, \"\")\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n if self.doc:\n return \"Command {:>50}{:<20}\".format( # pylint: disable=consider-using-f-string\n self.name, self.doc\n )\n return f\"Command {self.name}\"\n\n\ndef _get_requests(members):\n \"\"\"Get requests.\"\"\"\n commands = list(\n filter(\n lambda x: (\n x[0] not in EXCLUDE and x[0] not in CLIENT_METHODS and callable(x[1])\n ),\n members,\n )\n )\n commands = {\n f\"client.{c[0]}\": Command(\n f\"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n }\n return commands\n\n\ndef _get_client_methods(members):\n \"\"\"Get client methods.\"\"\"\n commands = list(\n filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)\n )\n commands = {\n \"client.{c[0]}\": Command(\n \"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n }\n return commands\n\n\ndef _get_client_properties(members):\n \"\"\"Get client properties.\"\"\"\n global CLIENT_ATTRIBUTES # pylint: disable=global-variable-not-assigned\n commands = list(filter(lambda x: not callable(x[1]), members))\n commands = {\n f\"client.{c[0]}\": Command(f\"client.{c[0]}\", None, \"Read Only!\", slave=False)\n for c in commands\n if (not c[0].startswith(\"_\") and isinstance(c[1], (str, int, float)))\n }\n CLIENT_ATTRIBUTES.extend(list(commands.keys()))\n return commands\n\n\ndef get_commands(client):\n \"\"\"Retrieve all required methods and attributes.\n\n Of a client object and convert it to commands.\n\n :param client: Modbus Client object.\n :return:\n \"\"\"\n commands = {}\n members = inspect.getmembers(client)\n requests = _get_requests(members)\n client_methods = _get_client_methods(members)\n client_attr = _get_client_properties(members)\n\n result_commands = inspect.getmembers(Result, predicate=predicate)\n result_commands = {\n f\"result.{c[0]}\": Command(f\"result.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]))\n for c in result_commands\n if (not c[0].startswith(\"_\") and c[0] != \"print_result\")\n }\n commands.update(requests)\n commands.update(client_methods)\n commands.update(client_attr)\n commands.update(result_commands)\n return commands\n\n\nclass Result:\n \"\"\"Represent result command.\"\"\"\n\n function_code: int = None\n data: Union[Dict[int, Any], Any] = None\n\n def __init__(self, result):\n \"\"\"Initialize.\n\n :param result: Response of a modbus command.\n \"\"\"\n if isinstance(result, dict): # Modbus response\n self.function_code = result.pop(\"function_code\", None)\n self.data = dict(result)\n else:\n self.data = result\n\n def decode(self, formatters, byte_order=\"big\", word_order=\"big\"):\n \"\"\"Decode the register response to known formatters.\n\n :param formatters: int8/16/32/64, uint8/16/32/64, float32/64\n :param byte_order: little/big\n :param word_order: little/big\n \"\"\"\n # Read Holding Registers (3)\n # Read Input Registers (4)\n # Read Write Registers (23)\n if not isinstance(formatters, (list, tuple)):\n formatters = [formatters]\n\n if self.function_code not in [3, 4, 23]:\n print_formatted_text(HTML(\"<red>Decoder works only for registers!!</red>\"))\n return\n byte_order = (\n Endian.Little if byte_order.strip().lower() == \"little\" else Endian.Big\n )\n word_order = (\n Endian.Little if word_order.strip().lower() == \"little\" else Endian.Big\n )\n decoder = BinaryPayloadDecoder.fromRegisters(\n self.data.get(\"registers\"), byteorder=byte_order, wordorder=word_order\n )\n for formatter in formatters:\n if not (formatter := FORMATTERS.get(formatter)):\n print_formatted_text(\n HTML(f\"<red>Invalid Formatter - {formatter}!!</red>\")\n )\n return\n decoded = getattr(decoder, formatter)()\n self.print_result(decoded)\n\n def raw(self):\n \"\"\"Return raw result dict.\"\"\"\n self.print_result()\n\n def _process_dict(self, use_dict):\n \"\"\"Process dict.\"\"\"\n new_dict = OrderedDict()\n for k, v_item in use_dict.items():\n if isinstance(v_item, bytes):\n v_item = v_item.decode(\"utf-8\")\n elif isinstance(v_item, dict):\n v_item = self._process_dict(v_item)\n elif isinstance(v_item, (list, tuple)):\n v_item = [\n v1.decode(\"utf-8\") if isinstance(v1, bytes) else v1 for v1 in v_item\n ]\n new_dict[k] = v_item\n return new_dict\n\n def print_result(self, data=None):\n \"\"\"Print result object pretty.\n\n :param data: Data to be printed.\n \"\"\"\n data = data or self.data\n if isinstance(data, dict):\n data = self._process_dict(data)\n elif isinstance(data, (list, tuple)):\n data = [v.decode(\"utf-8\") if isinstance(v, bytes) else v for v in data]\n elif isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n tokens = list(pygments.lex(json.dumps(data, indent=4), lexer=JsonLexer()))\n print_formatted_text(PygmentsTokens(tokens))\n", "path": "pymodbus/repl/client/helper.py"}], "after_files": [{"content": "\"\"\"Helper Module for REPL actions.\"\"\"\nimport inspect\n\n# pylint: disable=missing-type-doc\nimport json\nfrom collections import OrderedDict\nfrom typing import Any, Dict, List, Union\n\nimport pygments\nfrom prompt_toolkit import print_formatted_text\nfrom prompt_toolkit.formatted_text import HTML, PygmentsTokens\nfrom pygments.lexers.data import JsonLexer\n\nfrom pymodbus.payload import BinaryPayloadDecoder, Endian\n\n\npredicate = inspect.isfunction\nargspec = inspect.signature\n\n\nFORMATTERS = {\n \"int8\": \"decode_8bit_int\",\n \"int16\": \"decode_16bit_int\",\n \"int32\": \"decode_32bit_int\",\n \"int64\": \"decode_64bit_int\",\n \"uint8\": \"decode_8bit_uint\",\n \"uint16\": \"decode_16bit_uint\",\n \"uint32\": \"decode_32bit_uint\",\n \"uint64\": \"decode_64bit_int\",\n \"float16\": \"decode_16bit_float\",\n \"float32\": \"decode_32bit_float\",\n \"float64\": \"decode_64bit_float\",\n}\n\n\nDEFAULT_KWARGS = {\"slave\": \"Slave address\"}\n\nOTHER_COMMANDS = {\n \"result.raw\": \"Show RAW Result\",\n \"result.decode\": \"Decode register response to known formats\",\n}\nEXCLUDE = [\"execute\", \"recv\", \"send\", \"trace\", \"set_debug\"]\nCLIENT_METHODS = [\n \"connect\",\n \"close\",\n \"idle_time\",\n \"is_socket_open\",\n \"get_port\",\n \"set_port\",\n \"get_stopbits\",\n \"set_stopbits\",\n \"get_bytesize\",\n \"set_bytesize\",\n \"get_parity\",\n \"set_parity\",\n \"get_baudrate\",\n \"set_baudrate\",\n \"get_timeout\",\n \"set_timeout\",\n \"get_serial_settings\",\n]\nCLIENT_ATTRIBUTES: List[str] = []\n\n\nclass Command:\n \"\"\"Class representing Commands to be consumed by Completer.\"\"\"\n\n def __init__(self, name, signature, doc, slave=False):\n \"\"\"Initialize.\n\n :param name: Name of the command\n :param signature: inspect object\n :param doc: Doc string for the command\n :param slave: Use slave as additional argument in the command .\n \"\"\"\n self.name = name\n self.doc = doc.split(\"\\n\") if doc else \" \".join(name.split(\"_\"))\n self.help_text = self._create_help()\n self.param_help = self._create_arg_help()\n if signature:\n self._params = signature.parameters\n self.args = self.create_completion()\n else:\n self._params = \"\"\n\n if self.name.startswith(\"client.\") and slave:\n self.args.update(**DEFAULT_KWARGS)\n\n def _create_help(self):\n \"\"\"Create help.\"\"\"\n doc = filter(lambda d: d, self.doc)\n cmd_help = list(\n filter(\n lambda x: not x.startswith(\":param\") and not x.startswith(\":return\"),\n doc,\n )\n )\n return \" \".join(cmd_help).strip()\n\n def _create_arg_help(self):\n \"\"\"Create arg help.\"\"\"\n param_dict = {}\n params = list(filter(lambda d: d.strip().startswith(\":param\"), self.doc))\n for param in params:\n param, param_help = param.split(\":param\")[1].strip().split(\":\")\n param_dict[param] = param_help\n return param_dict\n\n def create_completion(self):\n \"\"\"Create command completion meta data.\n\n :return:\n \"\"\"\n words = {}\n\n def _create(entry, default):\n if entry not in [\"self\", \"kwargs\"]:\n if isinstance(default, (int, str)):\n entry += f\"={default}\"\n return entry\n return None\n\n for arg in self._params.values():\n if entry := _create(arg.name, arg.default):\n entry, meta = self.get_meta(entry)\n words[entry] = meta\n\n return words\n\n def get_completion(self):\n \"\"\"Get a list of completions.\n\n :return:\n \"\"\"\n return self.args.keys()\n\n def get_meta(self, cmd):\n \"\"\"Get Meta info of a given command.\n\n :param cmd: Name of command.\n :return: Dict containing meta info.\n \"\"\"\n cmd = cmd.strip()\n cmd = cmd.split(\"=\")[0].strip()\n return cmd, self.param_help.get(cmd, \"\")\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n if self.doc:\n return \"Command {:>50}{:<20}\".format( # pylint: disable=consider-using-f-string\n self.name, self.doc\n )\n return f\"Command {self.name}\"\n\n\ndef _get_requests(members):\n \"\"\"Get requests.\"\"\"\n commands = list(\n filter(\n lambda x: (\n x[0] not in EXCLUDE and x[0] not in CLIENT_METHODS and callable(x[1])\n ),\n members,\n )\n )\n commands = {\n f\"client.{c[0]}\": Command(\n f\"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n }\n return commands\n\n\ndef _get_client_methods(members):\n \"\"\"Get client methods.\"\"\"\n commands = list(\n filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)\n )\n commands = {\n f\"client.{c[0]}\": Command(\n f\"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n }\n return commands\n\n\ndef _get_client_properties(members):\n \"\"\"Get client properties.\"\"\"\n global CLIENT_ATTRIBUTES # pylint: disable=global-variable-not-assigned\n commands = list(filter(lambda x: not callable(x[1]), members))\n commands = {\n f\"client.{c[0]}\": Command(f\"client.{c[0]}\", None, \"Read Only!\", slave=False)\n for c in commands\n if (not c[0].startswith(\"_\") and isinstance(c[1], (str, int, float)))\n }\n CLIENT_ATTRIBUTES.extend(list(commands.keys()))\n return commands\n\n\ndef get_commands(client):\n \"\"\"Retrieve all required methods and attributes.\n\n Of a client object and convert it to commands.\n\n :param client: Modbus Client object.\n :return:\n \"\"\"\n commands = {}\n members = inspect.getmembers(client)\n requests = _get_requests(members)\n client_methods = _get_client_methods(members)\n client_attr = _get_client_properties(members)\n\n result_commands = inspect.getmembers(Result, predicate=predicate)\n result_commands = {\n f\"result.{c[0]}\": Command(f\"result.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]))\n for c in result_commands\n if (not c[0].startswith(\"_\") and c[0] != \"print_result\")\n }\n commands.update(requests)\n commands.update(client_methods)\n commands.update(client_attr)\n commands.update(result_commands)\n return commands\n\n\nclass Result:\n \"\"\"Represent result command.\"\"\"\n\n function_code: int = None\n data: Union[Dict[int, Any], Any] = None\n\n def __init__(self, result):\n \"\"\"Initialize.\n\n :param result: Response of a modbus command.\n \"\"\"\n if isinstance(result, dict): # Modbus response\n self.function_code = result.pop(\"function_code\", None)\n self.data = dict(result)\n else:\n self.data = result\n\n def decode(self, formatters, byte_order=\"big\", word_order=\"big\"):\n \"\"\"Decode the register response to known formatters.\n\n :param formatters: int8/16/32/64, uint8/16/32/64, float32/64\n :param byte_order: little/big\n :param word_order: little/big\n \"\"\"\n # Read Holding Registers (3)\n # Read Input Registers (4)\n # Read Write Registers (23)\n if not isinstance(formatters, (list, tuple)):\n formatters = [formatters]\n\n if self.function_code not in [3, 4, 23]:\n print_formatted_text(HTML(\"<red>Decoder works only for registers!!</red>\"))\n return\n byte_order = (\n Endian.Little if byte_order.strip().lower() == \"little\" else Endian.Big\n )\n word_order = (\n Endian.Little if word_order.strip().lower() == \"little\" else Endian.Big\n )\n decoder = BinaryPayloadDecoder.fromRegisters(\n self.data.get(\"registers\"), byteorder=byte_order, wordorder=word_order\n )\n for formatter in formatters:\n if not (formatter := FORMATTERS.get(formatter)):\n print_formatted_text(\n HTML(f\"<red>Invalid Formatter - {formatter}!!</red>\")\n )\n return\n decoded = getattr(decoder, formatter)()\n self.print_result(decoded)\n\n def raw(self):\n \"\"\"Return raw result dict.\"\"\"\n self.print_result()\n\n def _process_dict(self, use_dict):\n \"\"\"Process dict.\"\"\"\n new_dict = OrderedDict()\n for k, v_item in use_dict.items():\n if isinstance(v_item, bytes):\n v_item = v_item.decode(\"utf-8\")\n elif isinstance(v_item, dict):\n v_item = self._process_dict(v_item)\n elif isinstance(v_item, (list, tuple)):\n v_item = [\n v1.decode(\"utf-8\") if isinstance(v1, bytes) else v1 for v1 in v_item\n ]\n new_dict[k] = v_item\n return new_dict\n\n def print_result(self, data=None):\n \"\"\"Print result object pretty.\n\n :param data: Data to be printed.\n \"\"\"\n data = data or self.data\n if isinstance(data, dict):\n data = self._process_dict(data)\n elif isinstance(data, (list, tuple)):\n data = [v.decode(\"utf-8\") if isinstance(v, bytes) else v for v in data]\n elif isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n tokens = list(pygments.lex(json.dumps(data, indent=4), lexer=JsonLexer()))\n print_formatted_text(PygmentsTokens(tokens))\n", "path": "pymodbus/repl/client/helper.py"}]} | 3,686 | 190 |
gh_patches_debug_27447 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AZURE_103 not accepting vsts_configuration as valid Git repository
**Describe the bug**
The rule CKV_AZURE_103 only accepts the Github configuration for Git source control for an Azure Data Factory instance. However, it is possible to configure a Git source control using the `vsts_configuration`.
**To Reproduce**
Steps to reproduce the behavior:
Create the following resource
```hcl
resource "azurerm_data_factory" "main" {
# General
name = "dummy-name"
resource_group_name = azurerm_resource_group.primary.name
location = azurerm_resource_group.primary.location
# Azure DevOps
vsts_configuration {
account_name = var.account_name
branch_name = var.branch_name
project_name = var.project_name
repository_name = var.repository_name
root_folder = var.root_folder
tenant_id = data.azurerm_client_config.current.tenant_id
}
}
```
**Expected behavior**
Expected to accept both vsts_configuration and github_configuration as valid Git source control configurations.
**Desktop (please complete the following information):**
- OS: Ubuntu 20.04 LTS
- Checkov Version 2.0.86
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py`
Content:
```
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.common.models.enums import CheckCategories
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
4
5
6 class DataFactoryUsesGitRepository(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that Azure Data Factory uses Git repository for source control"
9 id = "CKV_AZURE_103"
10 supported_resources = ['azurerm_data_factory']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "github_configuration/[0]/repository_name"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = DataFactoryUsesGitRepository()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py
--- a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py
+++ b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py
@@ -1,21 +1,25 @@
-from checkov.common.models.consts import ANY_VALUE
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+from typing import Dict, List, Any
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-class DataFactoryUsesGitRepository(BaseResourceValueCheck):
- def __init__(self):
+
+class DataFactoryUsesGitRepository(BaseResourceCheck):
+ def __init__(self) -> None:
name = "Ensure that Azure Data Factory uses Git repository for source control"
id = "CKV_AZURE_103"
- supported_resources = ['azurerm_data_factory']
+ supported_resources = ["azurerm_data_factory"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return "github_configuration/[0]/repository_name"
-
- def get_expected_value(self):
- return ANY_VALUE
+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ github = conf.get("github_configuration", [{}])[0]
+ if github.get("repository_name"):
+ return CheckResult.PASSED
+ vsts = conf.get("vsts_configuration", [{}])[0]
+ if vsts.get("repository_name"):
+ return CheckResult.PASSED
+ return CheckResult.FAILED
check = DataFactoryUsesGitRepository()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n--- a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n+++ b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n@@ -1,21 +1,25 @@\n-from checkov.common.models.consts import ANY_VALUE\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from typing import Dict, List, Any\n \n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n-class DataFactoryUsesGitRepository(BaseResourceValueCheck):\n- def __init__(self):\n+\n+class DataFactoryUsesGitRepository(BaseResourceCheck):\n+ def __init__(self) -> None:\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n- supported_resources = ['azurerm_data_factory']\n+ supported_resources = [\"azurerm_data_factory\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return \"github_configuration/[0]/repository_name\"\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ github = conf.get(\"github_configuration\", [{}])[0]\n+ if github.get(\"repository_name\"):\n+ return CheckResult.PASSED\n+ vsts = conf.get(\"vsts_configuration\", [{}])[0]\n+ if vsts.get(\"repository_name\"):\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n \n \n check = DataFactoryUsesGitRepository()\n", "issue": "CKV_AZURE_103 not accepting vsts_configuration as valid Git repository\n**Describe the bug**\r\nThe rule CKV_AZURE_103 only accepts the Github configuration for Git source control for an Azure Data Factory instance. However, it is possible to configure a Git source control using the `vsts_configuration`. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nCreate the following resource\r\n```hcl\r\nresource \"azurerm_data_factory\" \"main\" {\r\n\r\n # General\r\n name = \"dummy-name\"\r\n resource_group_name = azurerm_resource_group.primary.name\r\n location = azurerm_resource_group.primary.location\r\n\r\n # Azure DevOps\r\n vsts_configuration {\r\n account_name = var.account_name\r\n branch_name = var.branch_name\r\n project_name = var.project_name\r\n repository_name = var.repository_name\r\n root_folder = var.root_folder\r\n tenant_id = data.azurerm_client_config.current.tenant_id\r\n }\r\n\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nExpected to accept both vsts_configuration and github_configuration as valid Git source control configurations.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 20.04 LTS\r\n - Checkov Version 2.0.86\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DataFactoryUsesGitRepository(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n supported_resources = ['azurerm_data_factory']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"github_configuration/[0]/repository_name\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = DataFactoryUsesGitRepository()\n", "path": "checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py"}], "after_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass DataFactoryUsesGitRepository(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n supported_resources = [\"azurerm_data_factory\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n github = conf.get(\"github_configuration\", [{}])[0]\n if github.get(\"repository_name\"):\n return CheckResult.PASSED\n vsts = conf.get(\"vsts_configuration\", [{}])[0]\n if vsts.get(\"repository_name\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = DataFactoryUsesGitRepository()\n", "path": "checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py"}]} | 750 | 436 |
gh_patches_debug_37919 | rasdani/github-patches | git_diff | google__timesketch-539 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tsctl to support TSV files
`tsctl` currently supports reading CSV files, however, often times its easier/less likely data gets broken by using _tab_ delimited files.
This is trivial to implement, e.g - :
1. add the option `tsv2ts` to [`shell_manager`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L424)
2. in `CreateTimelineFromCsv()`have an option for _delimiter_ and if `tsv2ts` was provided, change the CSV [`reader`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L412) delimiter to a tab.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `timesketch/lib/tasks.py`
Content:
```
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Celery task for processing Plaso storage files."""
15
16 import logging
17 import subprocess
18 import traceback
19
20 from flask import current_app
21
22 from timesketch import create_app
23 from timesketch import create_celery_app
24 from timesketch.lib.datastores.elastic import ElasticsearchDataStore
25 from timesketch.lib.utils import read_and_validate_csv
26 from timesketch.lib.utils import read_and_validate_jsonl
27 from timesketch.models import db_session
28 from timesketch.models.sketch import SearchIndex
29 from timesketch.models.sketch import Timeline
30
31 celery = create_celery_app()
32 flask_app = create_app()
33
34
35 def _set_timeline_status(index_name, status, error_msg=None):
36 """Helper function to set status for searchindex and all related timelines.
37
38 Args:
39 index_name: Name of the datastore index.
40 status: Status to set.
41 error_msg: Error message.
42 """
43 # Run within Flask context so we can make database changes
44 with flask_app.app_context():
45 searchindex = SearchIndex.query.filter_by(index_name=index_name).first()
46 timelines = Timeline.query.filter_by(searchindex=searchindex).all()
47
48 # Set status
49 searchindex.set_status(status)
50 for timeline in timelines:
51 timeline.set_status(status)
52 db_session.add(timeline)
53
54 # Update description if there was a failure in ingestion
55 if error_msg and status == u'fail':
56 # TODO: Don't overload the description field.
57 searchindex.description = error_msg
58
59 # Commit changes to database
60 db_session.add(searchindex)
61 db_session.commit()
62
63
64 @celery.task(track_started=True)
65 def run_plaso(source_file_path, timeline_name, index_name, source_type,
66 username=None):
67 """Create a Celery task for processing Plaso storage file.
68
69 Args:
70 source_file_path: Path to plaso storage file.
71 timeline_name: Name of the Timesketch timeline.
72 index_name: Name of the datastore index.
73 source_type: Type of file, csv or jsonl.
74 username: Username of the user who will own the timeline.
75
76 Returns:
77 String with summary of processed events.
78 """
79 # Log information to Celery
80 logging.info(u'Source type: %s', source_type)
81
82 cmd = [
83 u'psort.py', u'-o', u'timesketch', source_file_path, u'--name',
84 timeline_name, u'--status_view', u'none', u'--index', index_name
85 ]
86
87 if username:
88 cmd.append(u'--username')
89 cmd.append(username)
90
91 # Run psort.py
92 try:
93 cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
94 except subprocess.CalledProcessError as e:
95 # Mark the searchindex and timelines as failed and exit the task
96 _set_timeline_status(index_name, status=u'fail', error_msg=e.output)
97 return e.output
98
99 # Mark the searchindex and timelines as ready
100 _set_timeline_status(index_name, status=u'ready')
101
102 return cmd_output
103
104
105 @celery.task(track_started=True)
106 def run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,
107 username=None):
108 """Create a Celery task for processing a CSV or JSONL file.
109
110 Args:
111 source_file_path: Path to CSV or JSONL file.
112 timeline_name: Name of the Timesketch timeline.
113 index_name: Name of the datastore index.
114 source_type: Type of file, csv or jsonl.
115 username: Username of the user who will own the timeline.
116
117 Returns:
118 Dictionary with count of processed events.
119 """
120 event_type = u'generic_event' # Document type for Elasticsearch
121 validators = {
122 u'csv': read_and_validate_csv,
123 u'jsonl': read_and_validate_jsonl
124 }
125 read_and_validate = validators.get(source_type)
126
127 # Log information to Celery
128 logging.info(u'Index name: %s', index_name)
129 logging.info(u'Timeline name: %s', timeline_name)
130 logging.info(u'Source type: %s', source_type)
131 logging.info(u'Document type: %s', event_type)
132 logging.info(u'Owner: %s', username)
133
134 es = ElasticsearchDataStore(
135 host=current_app.config[u'ELASTIC_HOST'],
136 port=current_app.config[u'ELASTIC_PORT'])
137
138 # Reason for the broad exception catch is that we want to capture
139 # all possible errors and exit the task.
140 try:
141 es.create_index(index_name=index_name, doc_type=event_type)
142 for event in read_and_validate(source_file_path):
143 es.import_event(index_name, event_type, event)
144 # Import the remaining events
145 total_events = es.import_event(index_name, event_type)
146 except Exception as e:
147 # Mark the searchindex and timelines as failed and exit the task
148 error_msg = traceback.format_exc(e)
149 _set_timeline_status(index_name, status=u'fail', error_msg=error_msg)
150 logging.error(error_msg)
151 return
152
153 # Set status to ready when done
154 _set_timeline_status(index_name, status=u'ready')
155
156 return {u'Events processed': total_events}
157
```
Path: `timesketch/lib/utils.py`
Content:
```
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Common functions and utilities."""
15
16 import colorsys
17 import csv
18 import datetime
19 import json
20 import random
21 import time
22
23 from dateutil import parser
24
25
26 def random_color():
27 """Generates a random color.
28
29 Returns:
30 Color as string in HEX
31 """
32 hue = random.random()
33 golden_ratio_conjugate = (1 + 5**0.5) / 2
34 hue += golden_ratio_conjugate
35 hue %= 1
36 rgb = tuple(int(i * 256) for i in colorsys.hsv_to_rgb(hue, 0.5, 0.95))
37 return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])
38
39
40 def read_and_validate_csv(path):
41 """Generator for reading a CSV file.
42
43 Args:
44 path: Path to the CSV file
45 """
46 # Columns that must be present in the CSV file
47 mandatory_fields = [u'message', u'datetime', u'timestamp_desc']
48
49 with open(path, 'rb') as fh:
50
51 reader = csv.DictReader(fh)
52 csv_header = reader.fieldnames
53 missing_fields = []
54 # Validate the CSV header
55 for field in mandatory_fields:
56 if field not in csv_header:
57 missing_fields.append(field)
58 if missing_fields:
59 raise RuntimeError(
60 u'Missing fields in CSV header: {0:s}'.format(missing_fields))
61 for row in reader:
62 if u'timestamp' not in csv_header and u'datetime' in csv_header:
63 try:
64 parsed_datetime = parser.parse(row[u'datetime'])
65 row[u'timestamp'] = str(
66 int(time.mktime(parsed_datetime.timetuple())))
67 except ValueError:
68 continue
69
70 yield row
71
72
73 def read_and_validate_jsonl(path):
74 """Generator for reading a JSONL (json lines) file.
75
76 Args:
77 path: Path to the JSONL file
78 """
79 # Fields that must be present in each entry of the JSONL file.
80 mandatory_fields = [u'message', u'datetime', u'timestamp_desc']
81 with open(path, 'rb') as fh:
82
83 lineno = 0
84 for line in fh:
85 lineno += 1
86 try:
87 linedict = json.loads(line)
88 ld_keys = linedict.keys()
89 if u'datetime' not in ld_keys and u'timestamp' in ld_keys:
90 epoch = int(str(linedict[u'timestamp'])[:10])
91 dt = datetime.datetime.fromtimestamp(epoch)
92 linedict[u'datetime'] = dt.isoformat()
93 if u'timestamp' not in ld_keys and u'datetime' in ld_keys:
94 linedict[u'timestamp'] = parser.parse(linedict[u'datetime'])
95
96 missing_fields = []
97 for field in mandatory_fields:
98 if field not in linedict.keys():
99 missing_fields.append(field)
100 if missing_fields:
101 raise RuntimeError(
102 u"Missing field(s) at line {0:n}: {1:s}"
103 .format(lineno, missing_fields))
104
105 yield linedict
106
107 except ValueError as e:
108 raise RuntimeError(
109 u"Error parsing JSON at line {0:n}: {1:s}"
110 .format(lineno, e))
111
112
113 def get_validated_indices(indices, sketch_indices):
114 """Exclude any deleted search index references.
115
116 Args:
117 indices: List of indices from the user
118 sketch_indices: List of indices in the sketch
119
120 Returns:
121 Set of indices with those removed that is not in the sketch
122 """
123 exclude = set(indices) - set(sketch_indices)
124 if exclude:
125 indices = [index for index in indices if index not in exclude]
126 return indices
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py
--- a/timesketch/lib/tasks.py
+++ b/timesketch/lib/tasks.py
@@ -104,7 +104,7 @@
@celery.task(track_started=True)
def run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,
- username=None):
+ delimiter=None, username=None):
"""Create a Celery task for processing a CSV or JSONL file.
Args:
@@ -112,6 +112,7 @@
timeline_name: Name of the Timesketch timeline.
index_name: Name of the datastore index.
source_type: Type of file, csv or jsonl.
+ delimiter: Character used as a field separator
username: Username of the user who will own the timeline.
Returns:
@@ -139,7 +140,7 @@
# all possible errors and exit the task.
try:
es.create_index(index_name=index_name, doc_type=event_type)
- for event in read_and_validate(source_file_path):
+ for event in read_and_validate(source_file_path, delimiter):
es.import_event(index_name, event_type, event)
# Import the remaining events
total_events = es.import_event(index_name, event_type)
diff --git a/timesketch/lib/utils.py b/timesketch/lib/utils.py
--- a/timesketch/lib/utils.py
+++ b/timesketch/lib/utils.py
@@ -37,18 +37,19 @@
return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])
-def read_and_validate_csv(path):
- """Generator for reading a CSV file.
+def read_and_validate_csv(path, delimiter):
+ """Generator for reading a CSV or TSV file.
Args:
- path: Path to the CSV file
+ path: Path to the file
+ delimiter: character used as a field separator
"""
# Columns that must be present in the CSV file
mandatory_fields = [u'message', u'datetime', u'timestamp_desc']
with open(path, 'rb') as fh:
- reader = csv.DictReader(fh)
+ reader = csv.DictReader(fh, delimiter=delimiter.decode('string_escape'))
csv_header = reader.fieldnames
missing_fields = []
# Validate the CSV header
@@ -70,7 +71,7 @@
yield row
-def read_and_validate_jsonl(path):
+def read_and_validate_jsonl(path, _):
"""Generator for reading a JSONL (json lines) file.
Args:
| {"golden_diff": "diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py\n--- a/timesketch/lib/tasks.py\n+++ b/timesketch/lib/tasks.py\n@@ -104,7 +104,7 @@\n \n @celery.task(track_started=True)\n def run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,\n- username=None):\n+ delimiter=None, username=None):\n \"\"\"Create a Celery task for processing a CSV or JSONL file.\n \n Args:\n@@ -112,6 +112,7 @@\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n+ delimiter: Character used as a field separator\n username: Username of the user who will own the timeline.\n \n Returns:\n@@ -139,7 +140,7 @@\n # all possible errors and exit the task.\n try:\n es.create_index(index_name=index_name, doc_type=event_type)\n- for event in read_and_validate(source_file_path):\n+ for event in read_and_validate(source_file_path, delimiter):\n es.import_event(index_name, event_type, event)\n # Import the remaining events\n total_events = es.import_event(index_name, event_type)\ndiff --git a/timesketch/lib/utils.py b/timesketch/lib/utils.py\n--- a/timesketch/lib/utils.py\n+++ b/timesketch/lib/utils.py\n@@ -37,18 +37,19 @@\n return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])\n \n \n-def read_and_validate_csv(path):\n- \"\"\"Generator for reading a CSV file.\n+def read_and_validate_csv(path, delimiter):\n+ \"\"\"Generator for reading a CSV or TSV file.\n \n Args:\n- path: Path to the CSV file\n+ path: Path to the file\n+ delimiter: character used as a field separator\n \"\"\"\n # Columns that must be present in the CSV file\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n \n with open(path, 'rb') as fh:\n \n- reader = csv.DictReader(fh)\n+ reader = csv.DictReader(fh, delimiter=delimiter.decode('string_escape'))\n csv_header = reader.fieldnames\n missing_fields = []\n # Validate the CSV header\n@@ -70,7 +71,7 @@\n yield row\n \n \n-def read_and_validate_jsonl(path):\n+def read_and_validate_jsonl(path, _):\n \"\"\"Generator for reading a JSONL (json lines) file.\n \n Args:\n", "issue": "tsctl to support TSV files\n`tsctl` currently supports reading CSV files, however, often times its easier/less likely data gets broken by using _tab_ delimited files.\r\n\r\nThis is trivial to implement, e.g - :\r\n1. add the option `tsv2ts` to [`shell_manager`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L424)\r\n2. in `CreateTimelineFromCsv()`have an option for _delimiter_ and if `tsv2ts` was provided, change the CSV [`reader`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L412) delimiter to a tab.\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Celery task for processing Plaso storage files.\"\"\"\n\nimport logging\nimport subprocess\nimport traceback\n\nfrom flask import current_app\n\nfrom timesketch import create_app\nfrom timesketch import create_celery_app\nfrom timesketch.lib.datastores.elastic import ElasticsearchDataStore\nfrom timesketch.lib.utils import read_and_validate_csv\nfrom timesketch.lib.utils import read_and_validate_jsonl\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\nfrom timesketch.models.sketch import Timeline\n\ncelery = create_celery_app()\nflask_app = create_app()\n\n\ndef _set_timeline_status(index_name, status, error_msg=None):\n \"\"\"Helper function to set status for searchindex and all related timelines.\n\n Args:\n index_name: Name of the datastore index.\n status: Status to set.\n error_msg: Error message.\n \"\"\"\n # Run within Flask context so we can make database changes\n with flask_app.app_context():\n searchindex = SearchIndex.query.filter_by(index_name=index_name).first()\n timelines = Timeline.query.filter_by(searchindex=searchindex).all()\n\n # Set status\n searchindex.set_status(status)\n for timeline in timelines:\n timeline.set_status(status)\n db_session.add(timeline)\n\n # Update description if there was a failure in ingestion\n if error_msg and status == u'fail':\n # TODO: Don't overload the description field.\n searchindex.description = error_msg\n\n # Commit changes to database\n db_session.add(searchindex)\n db_session.commit()\n\n\[email protected](track_started=True)\ndef run_plaso(source_file_path, timeline_name, index_name, source_type,\n username=None):\n \"\"\"Create a Celery task for processing Plaso storage file.\n\n Args:\n source_file_path: Path to plaso storage file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n username: Username of the user who will own the timeline.\n\n Returns:\n String with summary of processed events.\n \"\"\"\n # Log information to Celery\n logging.info(u'Source type: %s', source_type)\n\n cmd = [\n u'psort.py', u'-o', u'timesketch', source_file_path, u'--name',\n timeline_name, u'--status_view', u'none', u'--index', index_name\n ]\n\n if username:\n cmd.append(u'--username')\n cmd.append(username)\n\n # Run psort.py\n try:\n cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n # Mark the searchindex and timelines as failed and exit the task\n _set_timeline_status(index_name, status=u'fail', error_msg=e.output)\n return e.output\n\n # Mark the searchindex and timelines as ready\n _set_timeline_status(index_name, status=u'ready')\n\n return cmd_output\n\n\[email protected](track_started=True)\ndef run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,\n username=None):\n \"\"\"Create a Celery task for processing a CSV or JSONL file.\n\n Args:\n source_file_path: Path to CSV or JSONL file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n username: Username of the user who will own the timeline.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n event_type = u'generic_event' # Document type for Elasticsearch\n validators = {\n u'csv': read_and_validate_csv,\n u'jsonl': read_and_validate_jsonl\n }\n read_and_validate = validators.get(source_type)\n\n # Log information to Celery\n logging.info(u'Index name: %s', index_name)\n logging.info(u'Timeline name: %s', timeline_name)\n logging.info(u'Source type: %s', source_type)\n logging.info(u'Document type: %s', event_type)\n logging.info(u'Owner: %s', username)\n\n es = ElasticsearchDataStore(\n host=current_app.config[u'ELASTIC_HOST'],\n port=current_app.config[u'ELASTIC_PORT'])\n\n # Reason for the broad exception catch is that we want to capture\n # all possible errors and exit the task.\n try:\n es.create_index(index_name=index_name, doc_type=event_type)\n for event in read_and_validate(source_file_path):\n es.import_event(index_name, event_type, event)\n # Import the remaining events\n total_events = es.import_event(index_name, event_type)\n except Exception as e:\n # Mark the searchindex and timelines as failed and exit the task\n error_msg = traceback.format_exc(e)\n _set_timeline_status(index_name, status=u'fail', error_msg=error_msg)\n logging.error(error_msg)\n return\n\n # Set status to ready when done\n _set_timeline_status(index_name, status=u'ready')\n\n return {u'Events processed': total_events}\n", "path": "timesketch/lib/tasks.py"}, {"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Common functions and utilities.\"\"\"\n\nimport colorsys\nimport csv\nimport datetime\nimport json\nimport random\nimport time\n\nfrom dateutil import parser\n\n\ndef random_color():\n \"\"\"Generates a random color.\n\n Returns:\n Color as string in HEX\n \"\"\"\n hue = random.random()\n golden_ratio_conjugate = (1 + 5**0.5) / 2\n hue += golden_ratio_conjugate\n hue %= 1\n rgb = tuple(int(i * 256) for i in colorsys.hsv_to_rgb(hue, 0.5, 0.95))\n return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])\n\n\ndef read_and_validate_csv(path):\n \"\"\"Generator for reading a CSV file.\n\n Args:\n path: Path to the CSV file\n \"\"\"\n # Columns that must be present in the CSV file\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n\n with open(path, 'rb') as fh:\n\n reader = csv.DictReader(fh)\n csv_header = reader.fieldnames\n missing_fields = []\n # Validate the CSV header\n for field in mandatory_fields:\n if field not in csv_header:\n missing_fields.append(field)\n if missing_fields:\n raise RuntimeError(\n u'Missing fields in CSV header: {0:s}'.format(missing_fields))\n for row in reader:\n if u'timestamp' not in csv_header and u'datetime' in csv_header:\n try:\n parsed_datetime = parser.parse(row[u'datetime'])\n row[u'timestamp'] = str(\n int(time.mktime(parsed_datetime.timetuple())))\n except ValueError:\n continue\n\n yield row\n\n\ndef read_and_validate_jsonl(path):\n \"\"\"Generator for reading a JSONL (json lines) file.\n\n Args:\n path: Path to the JSONL file\n \"\"\"\n # Fields that must be present in each entry of the JSONL file.\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n with open(path, 'rb') as fh:\n\n lineno = 0\n for line in fh:\n lineno += 1\n try:\n linedict = json.loads(line)\n ld_keys = linedict.keys()\n if u'datetime' not in ld_keys and u'timestamp' in ld_keys:\n epoch = int(str(linedict[u'timestamp'])[:10])\n dt = datetime.datetime.fromtimestamp(epoch)\n linedict[u'datetime'] = dt.isoformat()\n if u'timestamp' not in ld_keys and u'datetime' in ld_keys:\n linedict[u'timestamp'] = parser.parse(linedict[u'datetime'])\n\n missing_fields = []\n for field in mandatory_fields:\n if field not in linedict.keys():\n missing_fields.append(field)\n if missing_fields:\n raise RuntimeError(\n u\"Missing field(s) at line {0:n}: {1:s}\"\n .format(lineno, missing_fields))\n\n yield linedict\n\n except ValueError as e:\n raise RuntimeError(\n u\"Error parsing JSON at line {0:n}: {1:s}\"\n .format(lineno, e))\n\n\ndef get_validated_indices(indices, sketch_indices):\n \"\"\"Exclude any deleted search index references.\n\n Args:\n indices: List of indices from the user\n sketch_indices: List of indices in the sketch\n\n Returns:\n Set of indices with those removed that is not in the sketch\n \"\"\"\n exclude = set(indices) - set(sketch_indices)\n if exclude:\n indices = [index for index in indices if index not in exclude]\n return indices\n", "path": "timesketch/lib/utils.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Celery task for processing Plaso storage files.\"\"\"\n\nimport logging\nimport subprocess\nimport traceback\n\nfrom flask import current_app\n\nfrom timesketch import create_app\nfrom timesketch import create_celery_app\nfrom timesketch.lib.datastores.elastic import ElasticsearchDataStore\nfrom timesketch.lib.utils import read_and_validate_csv\nfrom timesketch.lib.utils import read_and_validate_jsonl\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\nfrom timesketch.models.sketch import Timeline\n\ncelery = create_celery_app()\nflask_app = create_app()\n\n\ndef _set_timeline_status(index_name, status, error_msg=None):\n \"\"\"Helper function to set status for searchindex and all related timelines.\n\n Args:\n index_name: Name of the datastore index.\n status: Status to set.\n error_msg: Error message.\n \"\"\"\n # Run within Flask context so we can make database changes\n with flask_app.app_context():\n searchindex = SearchIndex.query.filter_by(index_name=index_name).first()\n timelines = Timeline.query.filter_by(searchindex=searchindex).all()\n\n # Set status\n searchindex.set_status(status)\n for timeline in timelines:\n timeline.set_status(status)\n db_session.add(timeline)\n\n # Update description if there was a failure in ingestion\n if error_msg and status == u'fail':\n # TODO: Don't overload the description field.\n searchindex.description = error_msg\n\n # Commit changes to database\n db_session.add(searchindex)\n db_session.commit()\n\n\[email protected](track_started=True)\ndef run_plaso(source_file_path, timeline_name, index_name, source_type,\n username=None):\n \"\"\"Create a Celery task for processing Plaso storage file.\n\n Args:\n source_file_path: Path to plaso storage file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n username: Username of the user who will own the timeline.\n\n Returns:\n String with summary of processed events.\n \"\"\"\n # Log information to Celery\n logging.info(u'Source type: %s', source_type)\n\n cmd = [\n u'psort.py', u'-o', u'timesketch', source_file_path, u'--name',\n timeline_name, u'--status_view', u'none', u'--index', index_name\n ]\n\n if username:\n cmd.append(u'--username')\n cmd.append(username)\n\n # Run psort.py\n try:\n cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n # Mark the searchindex and timelines as failed and exit the task\n _set_timeline_status(index_name, status=u'fail', error_msg=e.output)\n return e.output\n\n # Mark the searchindex and timelines as ready\n _set_timeline_status(index_name, status=u'ready')\n\n return cmd_output\n\n\[email protected](track_started=True)\ndef run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,\n delimiter=None, username=None):\n \"\"\"Create a Celery task for processing a CSV or JSONL file.\n\n Args:\n source_file_path: Path to CSV or JSONL file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n delimiter: Character used as a field separator\n username: Username of the user who will own the timeline.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n event_type = u'generic_event' # Document type for Elasticsearch\n validators = {\n u'csv': read_and_validate_csv,\n u'jsonl': read_and_validate_jsonl\n }\n read_and_validate = validators.get(source_type)\n\n # Log information to Celery\n logging.info(u'Index name: %s', index_name)\n logging.info(u'Timeline name: %s', timeline_name)\n logging.info(u'Source type: %s', source_type)\n logging.info(u'Document type: %s', event_type)\n logging.info(u'Owner: %s', username)\n\n es = ElasticsearchDataStore(\n host=current_app.config[u'ELASTIC_HOST'],\n port=current_app.config[u'ELASTIC_PORT'])\n\n # Reason for the broad exception catch is that we want to capture\n # all possible errors and exit the task.\n try:\n es.create_index(index_name=index_name, doc_type=event_type)\n for event in read_and_validate(source_file_path, delimiter):\n es.import_event(index_name, event_type, event)\n # Import the remaining events\n total_events = es.import_event(index_name, event_type)\n except Exception as e:\n # Mark the searchindex and timelines as failed and exit the task\n error_msg = traceback.format_exc(e)\n _set_timeline_status(index_name, status=u'fail', error_msg=error_msg)\n logging.error(error_msg)\n return\n\n # Set status to ready when done\n _set_timeline_status(index_name, status=u'ready')\n\n return {u'Events processed': total_events}\n", "path": "timesketch/lib/tasks.py"}, {"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Common functions and utilities.\"\"\"\n\nimport colorsys\nimport csv\nimport datetime\nimport json\nimport random\nimport time\n\nfrom dateutil import parser\n\n\ndef random_color():\n \"\"\"Generates a random color.\n\n Returns:\n Color as string in HEX\n \"\"\"\n hue = random.random()\n golden_ratio_conjugate = (1 + 5**0.5) / 2\n hue += golden_ratio_conjugate\n hue %= 1\n rgb = tuple(int(i * 256) for i in colorsys.hsv_to_rgb(hue, 0.5, 0.95))\n return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])\n\n\ndef read_and_validate_csv(path, delimiter):\n \"\"\"Generator for reading a CSV or TSV file.\n\n Args:\n path: Path to the file\n delimiter: character used as a field separator\n \"\"\"\n # Columns that must be present in the CSV file\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n\n with open(path, 'rb') as fh:\n\n reader = csv.DictReader(fh, delimiter=delimiter.decode('string_escape'))\n csv_header = reader.fieldnames\n missing_fields = []\n # Validate the CSV header\n for field in mandatory_fields:\n if field not in csv_header:\n missing_fields.append(field)\n if missing_fields:\n raise RuntimeError(\n u'Missing fields in CSV header: {0:s}'.format(missing_fields))\n for row in reader:\n if u'timestamp' not in csv_header and u'datetime' in csv_header:\n try:\n parsed_datetime = parser.parse(row[u'datetime'])\n row[u'timestamp'] = str(\n int(time.mktime(parsed_datetime.timetuple())))\n except ValueError:\n continue\n\n yield row\n\n\ndef read_and_validate_jsonl(path, _):\n \"\"\"Generator for reading a JSONL (json lines) file.\n\n Args:\n path: Path to the JSONL file\n \"\"\"\n # Fields that must be present in each entry of the JSONL file.\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n with open(path, 'rb') as fh:\n\n lineno = 0\n for line in fh:\n lineno += 1\n try:\n linedict = json.loads(line)\n ld_keys = linedict.keys()\n if u'datetime' not in ld_keys and u'timestamp' in ld_keys:\n epoch = int(str(linedict[u'timestamp'])[:10])\n dt = datetime.datetime.fromtimestamp(epoch)\n linedict[u'datetime'] = dt.isoformat()\n if u'timestamp' not in ld_keys and u'datetime' in ld_keys:\n linedict[u'timestamp'] = parser.parse(linedict[u'datetime'])\n\n missing_fields = []\n for field in mandatory_fields:\n if field not in linedict.keys():\n missing_fields.append(field)\n if missing_fields:\n raise RuntimeError(\n u\"Missing field(s) at line {0:n}: {1:s}\"\n .format(lineno, missing_fields))\n\n yield linedict\n\n except ValueError as e:\n raise RuntimeError(\n u\"Error parsing JSON at line {0:n}: {1:s}\"\n .format(lineno, e))\n\n\ndef get_validated_indices(indices, sketch_indices):\n \"\"\"Exclude any deleted search index references.\n\n Args:\n indices: List of indices from the user\n sketch_indices: List of indices in the sketch\n\n Returns:\n Set of indices with those removed that is not in the sketch\n \"\"\"\n exclude = set(indices) - set(sketch_indices)\n if exclude:\n indices = [index for index in indices if index not in exclude]\n return indices\n", "path": "timesketch/lib/utils.py"}]} | 3,273 | 599 |
gh_patches_debug_34807 | rasdani/github-patches | git_diff | conan-io__conan-4611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation message for `cppinfo.cppflags`
Introduce a new alias of `cppflags` named `cxxflags` and change the docs. If possible, show a deprecation message when cppflags is used.
The choice of `cppflags` meaning `c++flags` was not good, because `cppflags` means preprocessor flags and it is confusing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/model/build_info.py`
Content:
```
1 import os
2 from collections import OrderedDict
3
4
5 DEFAULT_INCLUDE = "include"
6 DEFAULT_LIB = "lib"
7 DEFAULT_BIN = "bin"
8 DEFAULT_RES = "res"
9 DEFAULT_SHARE = "share"
10
11
12 class _CppInfo(object):
13 """ Object that stores all the necessary information to build in C/C++.
14 It is intended to be system independent, translation to
15 specific systems will be produced from this info
16 """
17 def __init__(self):
18 self.includedirs = [] # Ordered list of include paths
19 self.srcdirs = [] # Ordered list of source paths
20 self.libdirs = [] # Directories to find libraries
21 self.resdirs = [] # Directories to find resources, data, etc
22 self.bindirs = [] # Directories to find executables and shared libs
23 self.builddirs = []
24 self.rootpaths = []
25 self.libs = [] # The libs to link against
26 self.defines = [] # preprocessor definitions
27 self.cflags = [] # pure C flags
28 self.cppflags = [] # C++ compilation flags
29 self.sharedlinkflags = [] # linker flags
30 self.exelinkflags = [] # linker flags
31 self.rootpath = ""
32 self.sysroot = ""
33 self._include_paths = None
34 self._lib_paths = None
35 self._bin_paths = None
36 self._build_paths = None
37 self._res_paths = None
38 self._src_paths = None
39 self.version = None # Version of the conan package
40 self.description = None # Description of the conan package
41 # When package is editable, filter_empty=False, so empty dirs are maintained
42 self.filter_empty = True
43
44 def _filter_paths(self, paths):
45 abs_paths = [os.path.join(self.rootpath, p)
46 if not os.path.isabs(p) else p for p in paths]
47 if self.filter_empty:
48 return [p for p in abs_paths if os.path.isdir(p)]
49 else:
50 return abs_paths
51
52 @property
53 def include_paths(self):
54 if self._include_paths is None:
55 self._include_paths = self._filter_paths(self.includedirs)
56 return self._include_paths
57
58 @property
59 def lib_paths(self):
60 if self._lib_paths is None:
61 self._lib_paths = self._filter_paths(self.libdirs)
62 return self._lib_paths
63
64 @property
65 def src_paths(self):
66 if self._src_paths is None:
67 self._src_paths = self._filter_paths(self.srcdirs)
68 return self._src_paths
69
70 @property
71 def bin_paths(self):
72 if self._bin_paths is None:
73 self._bin_paths = self._filter_paths(self.bindirs)
74 return self._bin_paths
75
76 @property
77 def build_paths(self):
78 if self._build_paths is None:
79 self._build_paths = self._filter_paths(self.builddirs)
80 return self._build_paths
81
82 @property
83 def res_paths(self):
84 if self._res_paths is None:
85 self._res_paths = self._filter_paths(self.resdirs)
86 return self._res_paths
87
88
89 class CppInfo(_CppInfo):
90 """ Build Information declared to be used by the CONSUMERS of a
91 conans. That means that consumers must use this flags and configs i order
92 to build properly.
93 Defined in user CONANFILE, directories are relative at user definition time
94 """
95 def __init__(self, root_folder):
96 super(CppInfo, self).__init__()
97 self.rootpath = root_folder # the full path of the package in which the conans is found
98 self.includedirs.append(DEFAULT_INCLUDE)
99 self.libdirs.append(DEFAULT_LIB)
100 self.bindirs.append(DEFAULT_BIN)
101 self.resdirs.append(DEFAULT_RES)
102 self.builddirs.append("")
103 # public_deps is needed to accumulate list of deps for cmake targets
104 self.public_deps = []
105 self.configs = {}
106
107 def __getattr__(self, config):
108
109 def _get_cpp_info():
110 result = _CppInfo()
111 result.rootpath = self.rootpath
112 result.sysroot = self.sysroot
113 result.includedirs.append(DEFAULT_INCLUDE)
114 result.libdirs.append(DEFAULT_LIB)
115 result.bindirs.append(DEFAULT_BIN)
116 result.resdirs.append(DEFAULT_RES)
117 result.builddirs.append("")
118 return result
119
120 return self.configs.setdefault(config, _get_cpp_info())
121
122
123 class _BaseDepsCppInfo(_CppInfo):
124 def __init__(self):
125 super(_BaseDepsCppInfo, self).__init__()
126
127 def update(self, dep_cpp_info):
128
129 def merge_lists(seq1, seq2):
130 return [s for s in seq1 if s not in seq2] + seq2
131
132 self.includedirs = merge_lists(self.includedirs, dep_cpp_info.include_paths)
133 self.srcdirs = merge_lists(self.srcdirs, dep_cpp_info.src_paths)
134 self.libdirs = merge_lists(self.libdirs, dep_cpp_info.lib_paths)
135 self.bindirs = merge_lists(self.bindirs, dep_cpp_info.bin_paths)
136 self.resdirs = merge_lists(self.resdirs, dep_cpp_info.res_paths)
137 self.builddirs = merge_lists(self.builddirs, dep_cpp_info.build_paths)
138 self.libs = merge_lists(self.libs, dep_cpp_info.libs)
139 self.rootpaths.append(dep_cpp_info.rootpath)
140
141 # Note these are in reverse order
142 self.defines = merge_lists(dep_cpp_info.defines, self.defines)
143 self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)
144 self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)
145 self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)
146 self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)
147
148 if not self.sysroot:
149 self.sysroot = dep_cpp_info.sysroot
150
151 @property
152 def include_paths(self):
153 return self.includedirs
154
155 @property
156 def lib_paths(self):
157 return self.libdirs
158
159 @property
160 def src_paths(self):
161 return self.srcdirs
162
163 @property
164 def bin_paths(self):
165 return self.bindirs
166
167 @property
168 def build_paths(self):
169 return self.builddirs
170
171 @property
172 def res_paths(self):
173 return self.resdirs
174
175
176 class DepsCppInfo(_BaseDepsCppInfo):
177 """ Build Information necessary to build a given conans. It contains the
178 flags, directories and options if its dependencies. The conans CONANFILE
179 should use these flags to pass them to the underlaying build system (Cmake, make),
180 so deps info is managed
181 """
182
183 def __init__(self):
184 super(DepsCppInfo, self).__init__()
185 self._dependencies = OrderedDict()
186 self.configs = {}
187
188 def __getattr__(self, config):
189 return self.configs.setdefault(config, _BaseDepsCppInfo())
190
191 @property
192 def dependencies(self):
193 return self._dependencies.items()
194
195 @property
196 def deps(self):
197 return self._dependencies.keys()
198
199 def __getitem__(self, item):
200 return self._dependencies[item]
201
202 def update(self, dep_cpp_info, pkg_name):
203 assert isinstance(dep_cpp_info, CppInfo)
204 self._dependencies[pkg_name] = dep_cpp_info
205 super(DepsCppInfo, self).update(dep_cpp_info)
206 for config, cpp_info in dep_cpp_info.configs.items():
207 self.configs.setdefault(config, _BaseDepsCppInfo()).update(cpp_info)
208
209 def update_deps_cpp_info(self, dep_cpp_info):
210 assert isinstance(dep_cpp_info, DepsCppInfo)
211 for pkg_name, cpp_info in dep_cpp_info.dependencies:
212 self.update(cpp_info, pkg_name)
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/model/build_info.py b/conans/model/build_info.py
--- a/conans/model/build_info.py
+++ b/conans/model/build_info.py
@@ -1,6 +1,7 @@
import os
from collections import OrderedDict
+import deprecation
DEFAULT_INCLUDE = "include"
DEFAULT_LIB = "lib"
@@ -25,7 +26,7 @@
self.libs = [] # The libs to link against
self.defines = [] # preprocessor definitions
self.cflags = [] # pure C flags
- self.cppflags = [] # C++ compilation flags
+ self.cxxflags = [] # C++ compilation flags
self.sharedlinkflags = [] # linker flags
self.exelinkflags = [] # linker flags
self.rootpath = ""
@@ -85,6 +86,17 @@
self._res_paths = self._filter_paths(self.resdirs)
return self._res_paths
+ # Compatibility for 'cppflags' (old style property to allow decoration)
+ @deprecation.deprecated(deprecated_in="1.13", removed_in="2.0", details="Use 'cxxflags' instead")
+ def get_cppflags(self):
+ return self.cxxflags
+
+ @deprecation.deprecated(deprecated_in="1.13", removed_in="2.0", details="Use 'cxxflags' instead")
+ def set_cppflags(self, value):
+ self.cxxflags = value
+
+ cppflags = property(get_cppflags, set_cppflags)
+
class CppInfo(_CppInfo):
""" Build Information declared to be used by the CONSUMERS of a
@@ -140,7 +152,7 @@
# Note these are in reverse order
self.defines = merge_lists(dep_cpp_info.defines, self.defines)
- self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)
+ self.cxxflags = merge_lists(dep_cpp_info.cxxflags, self.cxxflags)
self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)
self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)
self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)
| {"golden_diff": "diff --git a/conans/model/build_info.py b/conans/model/build_info.py\n--- a/conans/model/build_info.py\n+++ b/conans/model/build_info.py\n@@ -1,6 +1,7 @@\n import os\n from collections import OrderedDict\n \n+import deprecation\n \n DEFAULT_INCLUDE = \"include\"\n DEFAULT_LIB = \"lib\"\n@@ -25,7 +26,7 @@\n self.libs = [] # The libs to link against\n self.defines = [] # preprocessor definitions\n self.cflags = [] # pure C flags\n- self.cppflags = [] # C++ compilation flags\n+ self.cxxflags = [] # C++ compilation flags\n self.sharedlinkflags = [] # linker flags\n self.exelinkflags = [] # linker flags\n self.rootpath = \"\"\n@@ -85,6 +86,17 @@\n self._res_paths = self._filter_paths(self.resdirs)\n return self._res_paths\n \n+ # Compatibility for 'cppflags' (old style property to allow decoration)\n+ @deprecation.deprecated(deprecated_in=\"1.13\", removed_in=\"2.0\", details=\"Use 'cxxflags' instead\")\n+ def get_cppflags(self):\n+ return self.cxxflags\n+\n+ @deprecation.deprecated(deprecated_in=\"1.13\", removed_in=\"2.0\", details=\"Use 'cxxflags' instead\")\n+ def set_cppflags(self, value):\n+ self.cxxflags = value\n+\n+ cppflags = property(get_cppflags, set_cppflags)\n+\n \n class CppInfo(_CppInfo):\n \"\"\" Build Information declared to be used by the CONSUMERS of a\n@@ -140,7 +152,7 @@\n \n # Note these are in reverse order\n self.defines = merge_lists(dep_cpp_info.defines, self.defines)\n- self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)\n+ self.cxxflags = merge_lists(dep_cpp_info.cxxflags, self.cxxflags)\n self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)\n self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)\n self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)\n", "issue": "Deprecation message for `cppinfo.cppflags`\nIntroduce a new alias of `cppflags` named `cxxflags` and change the docs. If possible, show a deprecation message when cppflags is used.\r\nThe choice of `cppflags` meaning `c++flags` was not good, because `cppflags` means preprocessor flags and it is confusing.\n", "before_files": [{"content": "import os\nfrom collections import OrderedDict\n\n\nDEFAULT_INCLUDE = \"include\"\nDEFAULT_LIB = \"lib\"\nDEFAULT_BIN = \"bin\"\nDEFAULT_RES = \"res\"\nDEFAULT_SHARE = \"share\"\n\n\nclass _CppInfo(object):\n \"\"\" Object that stores all the necessary information to build in C/C++.\n It is intended to be system independent, translation to\n specific systems will be produced from this info\n \"\"\"\n def __init__(self):\n self.includedirs = [] # Ordered list of include paths\n self.srcdirs = [] # Ordered list of source paths\n self.libdirs = [] # Directories to find libraries\n self.resdirs = [] # Directories to find resources, data, etc\n self.bindirs = [] # Directories to find executables and shared libs\n self.builddirs = []\n self.rootpaths = []\n self.libs = [] # The libs to link against\n self.defines = [] # preprocessor definitions\n self.cflags = [] # pure C flags\n self.cppflags = [] # C++ compilation flags\n self.sharedlinkflags = [] # linker flags\n self.exelinkflags = [] # linker flags\n self.rootpath = \"\"\n self.sysroot = \"\"\n self._include_paths = None\n self._lib_paths = None\n self._bin_paths = None\n self._build_paths = None\n self._res_paths = None\n self._src_paths = None\n self.version = None # Version of the conan package\n self.description = None # Description of the conan package\n # When package is editable, filter_empty=False, so empty dirs are maintained\n self.filter_empty = True\n\n def _filter_paths(self, paths):\n abs_paths = [os.path.join(self.rootpath, p)\n if not os.path.isabs(p) else p for p in paths]\n if self.filter_empty:\n return [p for p in abs_paths if os.path.isdir(p)]\n else:\n return abs_paths\n\n @property\n def include_paths(self):\n if self._include_paths is None:\n self._include_paths = self._filter_paths(self.includedirs)\n return self._include_paths\n\n @property\n def lib_paths(self):\n if self._lib_paths is None:\n self._lib_paths = self._filter_paths(self.libdirs)\n return self._lib_paths\n\n @property\n def src_paths(self):\n if self._src_paths is None:\n self._src_paths = self._filter_paths(self.srcdirs)\n return self._src_paths\n\n @property\n def bin_paths(self):\n if self._bin_paths is None:\n self._bin_paths = self._filter_paths(self.bindirs)\n return self._bin_paths\n\n @property\n def build_paths(self):\n if self._build_paths is None:\n self._build_paths = self._filter_paths(self.builddirs)\n return self._build_paths\n\n @property\n def res_paths(self):\n if self._res_paths is None:\n self._res_paths = self._filter_paths(self.resdirs)\n return self._res_paths\n\n\nclass CppInfo(_CppInfo):\n \"\"\" Build Information declared to be used by the CONSUMERS of a\n conans. That means that consumers must use this flags and configs i order\n to build properly.\n Defined in user CONANFILE, directories are relative at user definition time\n \"\"\"\n def __init__(self, root_folder):\n super(CppInfo, self).__init__()\n self.rootpath = root_folder # the full path of the package in which the conans is found\n self.includedirs.append(DEFAULT_INCLUDE)\n self.libdirs.append(DEFAULT_LIB)\n self.bindirs.append(DEFAULT_BIN)\n self.resdirs.append(DEFAULT_RES)\n self.builddirs.append(\"\")\n # public_deps is needed to accumulate list of deps for cmake targets\n self.public_deps = []\n self.configs = {}\n\n def __getattr__(self, config):\n\n def _get_cpp_info():\n result = _CppInfo()\n result.rootpath = self.rootpath\n result.sysroot = self.sysroot\n result.includedirs.append(DEFAULT_INCLUDE)\n result.libdirs.append(DEFAULT_LIB)\n result.bindirs.append(DEFAULT_BIN)\n result.resdirs.append(DEFAULT_RES)\n result.builddirs.append(\"\")\n return result\n\n return self.configs.setdefault(config, _get_cpp_info())\n\n\nclass _BaseDepsCppInfo(_CppInfo):\n def __init__(self):\n super(_BaseDepsCppInfo, self).__init__()\n\n def update(self, dep_cpp_info):\n\n def merge_lists(seq1, seq2):\n return [s for s in seq1 if s not in seq2] + seq2\n\n self.includedirs = merge_lists(self.includedirs, dep_cpp_info.include_paths)\n self.srcdirs = merge_lists(self.srcdirs, dep_cpp_info.src_paths)\n self.libdirs = merge_lists(self.libdirs, dep_cpp_info.lib_paths)\n self.bindirs = merge_lists(self.bindirs, dep_cpp_info.bin_paths)\n self.resdirs = merge_lists(self.resdirs, dep_cpp_info.res_paths)\n self.builddirs = merge_lists(self.builddirs, dep_cpp_info.build_paths)\n self.libs = merge_lists(self.libs, dep_cpp_info.libs)\n self.rootpaths.append(dep_cpp_info.rootpath)\n\n # Note these are in reverse order\n self.defines = merge_lists(dep_cpp_info.defines, self.defines)\n self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)\n self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)\n self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)\n self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)\n\n if not self.sysroot:\n self.sysroot = dep_cpp_info.sysroot\n\n @property\n def include_paths(self):\n return self.includedirs\n\n @property\n def lib_paths(self):\n return self.libdirs\n\n @property\n def src_paths(self):\n return self.srcdirs\n\n @property\n def bin_paths(self):\n return self.bindirs\n\n @property\n def build_paths(self):\n return self.builddirs\n\n @property\n def res_paths(self):\n return self.resdirs\n\n\nclass DepsCppInfo(_BaseDepsCppInfo):\n \"\"\" Build Information necessary to build a given conans. It contains the\n flags, directories and options if its dependencies. The conans CONANFILE\n should use these flags to pass them to the underlaying build system (Cmake, make),\n so deps info is managed\n \"\"\"\n\n def __init__(self):\n super(DepsCppInfo, self).__init__()\n self._dependencies = OrderedDict()\n self.configs = {}\n\n def __getattr__(self, config):\n return self.configs.setdefault(config, _BaseDepsCppInfo())\n\n @property\n def dependencies(self):\n return self._dependencies.items()\n\n @property\n def deps(self):\n return self._dependencies.keys()\n\n def __getitem__(self, item):\n return self._dependencies[item]\n\n def update(self, dep_cpp_info, pkg_name):\n assert isinstance(dep_cpp_info, CppInfo)\n self._dependencies[pkg_name] = dep_cpp_info\n super(DepsCppInfo, self).update(dep_cpp_info)\n for config, cpp_info in dep_cpp_info.configs.items():\n self.configs.setdefault(config, _BaseDepsCppInfo()).update(cpp_info)\n\n def update_deps_cpp_info(self, dep_cpp_info):\n assert isinstance(dep_cpp_info, DepsCppInfo)\n for pkg_name, cpp_info in dep_cpp_info.dependencies:\n self.update(cpp_info, pkg_name)\n", "path": "conans/model/build_info.py"}], "after_files": [{"content": "import os\nfrom collections import OrderedDict\n\nimport deprecation\n\nDEFAULT_INCLUDE = \"include\"\nDEFAULT_LIB = \"lib\"\nDEFAULT_BIN = \"bin\"\nDEFAULT_RES = \"res\"\nDEFAULT_SHARE = \"share\"\n\n\nclass _CppInfo(object):\n \"\"\" Object that stores all the necessary information to build in C/C++.\n It is intended to be system independent, translation to\n specific systems will be produced from this info\n \"\"\"\n def __init__(self):\n self.includedirs = [] # Ordered list of include paths\n self.srcdirs = [] # Ordered list of source paths\n self.libdirs = [] # Directories to find libraries\n self.resdirs = [] # Directories to find resources, data, etc\n self.bindirs = [] # Directories to find executables and shared libs\n self.builddirs = []\n self.rootpaths = []\n self.libs = [] # The libs to link against\n self.defines = [] # preprocessor definitions\n self.cflags = [] # pure C flags\n self.cxxflags = [] # C++ compilation flags\n self.sharedlinkflags = [] # linker flags\n self.exelinkflags = [] # linker flags\n self.rootpath = \"\"\n self.sysroot = \"\"\n self._include_paths = None\n self._lib_paths = None\n self._bin_paths = None\n self._build_paths = None\n self._res_paths = None\n self._src_paths = None\n self.version = None # Version of the conan package\n self.description = None # Description of the conan package\n # When package is editable, filter_empty=False, so empty dirs are maintained\n self.filter_empty = True\n\n def _filter_paths(self, paths):\n abs_paths = [os.path.join(self.rootpath, p)\n if not os.path.isabs(p) else p for p in paths]\n if self.filter_empty:\n return [p for p in abs_paths if os.path.isdir(p)]\n else:\n return abs_paths\n\n @property\n def include_paths(self):\n if self._include_paths is None:\n self._include_paths = self._filter_paths(self.includedirs)\n return self._include_paths\n\n @property\n def lib_paths(self):\n if self._lib_paths is None:\n self._lib_paths = self._filter_paths(self.libdirs)\n return self._lib_paths\n\n @property\n def src_paths(self):\n if self._src_paths is None:\n self._src_paths = self._filter_paths(self.srcdirs)\n return self._src_paths\n\n @property\n def bin_paths(self):\n if self._bin_paths is None:\n self._bin_paths = self._filter_paths(self.bindirs)\n return self._bin_paths\n\n @property\n def build_paths(self):\n if self._build_paths is None:\n self._build_paths = self._filter_paths(self.builddirs)\n return self._build_paths\n\n @property\n def res_paths(self):\n if self._res_paths is None:\n self._res_paths = self._filter_paths(self.resdirs)\n return self._res_paths\n\n # Compatibility for 'cppflags' (old style property to allow decoration)\n @deprecation.deprecated(deprecated_in=\"1.13\", removed_in=\"2.0\", details=\"Use 'cxxflags' instead\")\n def get_cppflags(self):\n return self.cxxflags\n\n @deprecation.deprecated(deprecated_in=\"1.13\", removed_in=\"2.0\", details=\"Use 'cxxflags' instead\")\n def set_cppflags(self, value):\n self.cxxflags = value\n\n cppflags = property(get_cppflags, set_cppflags)\n\n\nclass CppInfo(_CppInfo):\n \"\"\" Build Information declared to be used by the CONSUMERS of a\n conans. That means that consumers must use this flags and configs i order\n to build properly.\n Defined in user CONANFILE, directories are relative at user definition time\n \"\"\"\n def __init__(self, root_folder):\n super(CppInfo, self).__init__()\n self.rootpath = root_folder # the full path of the package in which the conans is found\n self.includedirs.append(DEFAULT_INCLUDE)\n self.libdirs.append(DEFAULT_LIB)\n self.bindirs.append(DEFAULT_BIN)\n self.resdirs.append(DEFAULT_RES)\n self.builddirs.append(\"\")\n # public_deps is needed to accumulate list of deps for cmake targets\n self.public_deps = []\n self.configs = {}\n\n def __getattr__(self, config):\n\n def _get_cpp_info():\n result = _CppInfo()\n result.rootpath = self.rootpath\n result.sysroot = self.sysroot\n result.includedirs.append(DEFAULT_INCLUDE)\n result.libdirs.append(DEFAULT_LIB)\n result.bindirs.append(DEFAULT_BIN)\n result.resdirs.append(DEFAULT_RES)\n result.builddirs.append(\"\")\n return result\n\n return self.configs.setdefault(config, _get_cpp_info())\n\n\nclass _BaseDepsCppInfo(_CppInfo):\n def __init__(self):\n super(_BaseDepsCppInfo, self).__init__()\n\n def update(self, dep_cpp_info):\n\n def merge_lists(seq1, seq2):\n return [s for s in seq1 if s not in seq2] + seq2\n\n self.includedirs = merge_lists(self.includedirs, dep_cpp_info.include_paths)\n self.srcdirs = merge_lists(self.srcdirs, dep_cpp_info.src_paths)\n self.libdirs = merge_lists(self.libdirs, dep_cpp_info.lib_paths)\n self.bindirs = merge_lists(self.bindirs, dep_cpp_info.bin_paths)\n self.resdirs = merge_lists(self.resdirs, dep_cpp_info.res_paths)\n self.builddirs = merge_lists(self.builddirs, dep_cpp_info.build_paths)\n self.libs = merge_lists(self.libs, dep_cpp_info.libs)\n self.rootpaths.append(dep_cpp_info.rootpath)\n\n # Note these are in reverse order\n self.defines = merge_lists(dep_cpp_info.defines, self.defines)\n self.cxxflags = merge_lists(dep_cpp_info.cxxflags, self.cxxflags)\n self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)\n self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)\n self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)\n\n if not self.sysroot:\n self.sysroot = dep_cpp_info.sysroot\n\n @property\n def include_paths(self):\n return self.includedirs\n\n @property\n def lib_paths(self):\n return self.libdirs\n\n @property\n def src_paths(self):\n return self.srcdirs\n\n @property\n def bin_paths(self):\n return self.bindirs\n\n @property\n def build_paths(self):\n return self.builddirs\n\n @property\n def res_paths(self):\n return self.resdirs\n\n\nclass DepsCppInfo(_BaseDepsCppInfo):\n \"\"\" Build Information necessary to build a given conans. It contains the\n flags, directories and options if its dependencies. The conans CONANFILE\n should use these flags to pass them to the underlaying build system (Cmake, make),\n so deps info is managed\n \"\"\"\n\n def __init__(self):\n super(DepsCppInfo, self).__init__()\n self._dependencies = OrderedDict()\n self.configs = {}\n\n def __getattr__(self, config):\n return self.configs.setdefault(config, _BaseDepsCppInfo())\n\n @property\n def dependencies(self):\n return self._dependencies.items()\n\n @property\n def deps(self):\n return self._dependencies.keys()\n\n def __getitem__(self, item):\n return self._dependencies[item]\n\n def update(self, dep_cpp_info, pkg_name):\n assert isinstance(dep_cpp_info, CppInfo)\n self._dependencies[pkg_name] = dep_cpp_info\n super(DepsCppInfo, self).update(dep_cpp_info)\n for config, cpp_info in dep_cpp_info.configs.items():\n self.configs.setdefault(config, _BaseDepsCppInfo()).update(cpp_info)\n\n def update_deps_cpp_info(self, dep_cpp_info):\n assert isinstance(dep_cpp_info, DepsCppInfo)\n for pkg_name, cpp_info in dep_cpp_info.dependencies:\n self.update(cpp_info, pkg_name)\n", "path": "conans/model/build_info.py"}]} | 2,592 | 512 |
gh_patches_debug_14629 | rasdani/github-patches | git_diff | biolab__orange3-2608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Names of models are not saved as settings
1. Put logistic regression on the canvas.
2. Edit its name
3. Save the schema
4. Close canvas
5. Open Orange and load the schema
The logistic regression name is reverted back to Logistic regression.
--------
The base class for learners sets `self.learner_name = self.name` in `__init__`, where `self.name` is the widgets class attribute `name`, which is set later in the derived classes. So `learner_name = Setting(name)` or `learner_name = Setting(OWWidget.name)` won't work since the name is only defined later. Having `learner_name = Setting("")` would require some magic.
`OWBaseLearner` already has a meta class. This may be the point where the setting can be properly initialized since the `name` is already set then.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/widgets/utils/owlearnerwidget.py`
Content:
```
1 from copy import deepcopy
2
3 import itertools
4 import numpy as np
5
6 from AnyQt.QtCore import QTimer, Qt
7
8 from Orange.data import Table
9 from Orange.modelling import Fitter, Learner, Model
10 from Orange.preprocess.preprocess import Preprocess
11 from Orange.widgets import gui
12 from Orange.widgets.settings import Setting
13 from Orange.widgets.utils import getmembers
14 from Orange.widgets.utils.signals import Output, Input
15 from Orange.widgets.utils.sql import check_sql_input
16 from Orange.widgets.widget import OWWidget, WidgetMetaClass, Msg
17
18
19 class OWBaseLearnerMeta(WidgetMetaClass):
20 """ Meta class for learner widgets
21
22 OWBaseLearner declares two outputs, learner and model with
23 generic type (Learner and Model).
24
25 This metaclass ensures that each of the subclasses gets
26 its own Outputs class with output that match the corresponding
27 learner.
28 """
29 def __new__(cls, name, bases, attributes):
30 def abstract_widget():
31 return not attributes.get("name")
32
33 def copy_outputs(template):
34 result = type("Outputs", (), {})
35 for name, signal in getmembers(template, Output):
36 setattr(result, name, deepcopy(signal))
37 return result
38
39 obj = super().__new__(cls, name, bases, attributes)
40 if abstract_widget():
41 return obj
42
43 learner = attributes.get("LEARNER")
44 if not learner:
45 raise AttributeError(
46 "'{}' must declare attribute LEARNER".format(name))
47
48 outputs = obj.Outputs = copy_outputs(obj.Outputs)
49 outputs.learner.type = learner
50 outputs.model.type = learner.__returns__
51
52 return obj
53
54
55 class OWBaseLearner(OWWidget, metaclass=OWBaseLearnerMeta):
56 """Abstract widget for classification/regression learners.
57
58 Notes
59 -----
60 All learner widgets should define learner class LEARNER.
61 LEARNER should have __returns__ attribute.
62
63 Overwrite `create_learner`, `add_main_layout` and `get_learner_parameters`
64 in case LEARNER has extra parameters.
65
66 """
67 LEARNER = None
68 supports_sparse = True
69
70 want_main_area = False
71 resizing_enabled = False
72 auto_apply = Setting(True)
73
74 class Error(OWWidget.Error):
75 data_error = Msg("{}")
76 fitting_failed = Msg("Fitting failed.\n{}")
77 sparse_not_supported = Msg("Sparse data is not supported.")
78 out_of_memory = Msg("Out of memory.")
79
80 class Warning(OWWidget.Warning):
81 outdated_learner = Msg("Press Apply to submit changes.")
82
83 class Inputs:
84 data = Input("Data", Table)
85 preprocessor = Input("Preprocessor", Preprocess)
86
87 class Outputs:
88 learner = Output("Learner", Learner, dynamic=False)
89 model = Output("Model", Model, dynamic=False,
90 replaces=["Classifier", "Predictor"])
91
92 OUTPUT_MODEL_NAME = Outputs.model.name # Attr for backcompat w/ self.send() code
93
94 def __init__(self):
95 super().__init__()
96 self.data = None
97 self.valid_data = False
98 self.learner = None
99 self.learner_name = self.name
100 self.model = None
101 self.preprocessors = None
102 self.outdated_settings = False
103
104 self.setup_layout()
105 QTimer.singleShot(0, getattr(self, "unconditional_apply", self.apply))
106
107 def create_learner(self):
108 """Creates a learner with current configuration.
109
110 Returns:
111 Learner: an instance of Orange.base.learner subclass.
112 """
113 return self.LEARNER(preprocessors=self.preprocessors)
114
115 def get_learner_parameters(self):
116 """Creates an `OrderedDict` or a sequence of pairs with current model
117 configuration.
118
119 Returns:
120 OrderedDict or List: (option, value) pairs or dict
121 """
122 return []
123
124 @Inputs.preprocessor
125 def set_preprocessor(self, preprocessor):
126 self.preprocessors = preprocessor
127 self.apply()
128
129 @Inputs.data
130 @check_sql_input
131 def set_data(self, data):
132 """Set the input train data set."""
133 self.Error.data_error.clear()
134 self.data = data
135 if data is not None and data.domain.class_var is None:
136 self.Error.data_error("Data has no target variable.")
137 self.data = None
138
139 self.update_model()
140
141 def apply(self):
142 """Applies learner and sends new model."""
143 self.update_learner()
144 self.update_model()
145
146 def update_learner(self):
147 self.learner = self.create_learner()
148 if self.learner and issubclass(self.LEARNER, Fitter):
149 self.learner.use_default_preprocessors = True
150 if self.learner is not None:
151 self.learner.name = self.learner_name
152 self.Outputs.learner.send(self.learner)
153 self.outdated_settings = False
154 self.Warning.outdated_learner.clear()
155
156 def show_fitting_failed(self, exc):
157 """Show error when fitting fails.
158 Derived widgets can override this to show more specific messages."""
159 self.Error.fitting_failed(str(exc), shown=exc is not None)
160
161 def update_model(self):
162 self.show_fitting_failed(None)
163 self.model = None
164 if self.check_data():
165 try:
166 self.model = self.learner(self.data)
167 except BaseException as exc:
168 self.show_fitting_failed(exc)
169 else:
170 self.model.name = self.learner_name
171 self.model.instances = self.data
172 self.Outputs.model.send(self.model)
173
174 def check_data(self):
175 self.valid_data = False
176 self.Error.sparse_not_supported.clear()
177 if self.data is not None and self.learner is not None:
178 self.Error.data_error.clear()
179 if not self.learner.check_learner_adequacy(self.data.domain):
180 self.Error.data_error(self.learner.learner_adequacy_err_msg)
181 elif not len(self.data):
182 self.Error.data_error("Data set is empty.")
183 elif len(np.unique(self.data.Y)) < 2:
184 self.Error.data_error("Data contains a single target value.")
185 elif self.data.X.size == 0:
186 self.Error.data_error("Data has no features to learn from.")
187 elif self.data.is_sparse() and not self.supports_sparse:
188 self.Error.sparse_not_supported()
189 else:
190 self.valid_data = True
191 return self.valid_data
192
193 def settings_changed(self, *args, **kwargs):
194 self.outdated_settings = True
195 self.Warning.outdated_learner(shown=not self.auto_apply)
196 self.apply()
197
198 def _change_name(self, instance, output):
199 if instance:
200 instance.name = self.learner_name
201 if self.auto_apply:
202 output.send(instance)
203
204 def learner_name_changed(self):
205 self._change_name(self.learner, self.Outputs.learner)
206 self._change_name(self.model, self.Outputs.model)
207
208 def send_report(self):
209 self.report_items((("Name", self.learner_name),))
210
211 model_parameters = self.get_learner_parameters()
212 if model_parameters:
213 self.report_items("Model parameters", model_parameters)
214
215 if self.data:
216 self.report_data("Data", self.data)
217
218 # GUI
219 def setup_layout(self):
220 self.add_learner_name_widget()
221 self.add_main_layout()
222 # Options specific to target variable type, if supported
223 if issubclass(self.LEARNER, Fitter):
224 # Only add a classification section if the method is overridden
225 if type(self).add_classification_layout is not \
226 OWBaseLearner.add_classification_layout:
227 classification_box = gui.widgetBox(
228 self.controlArea, 'Classification')
229 self.add_classification_layout(classification_box)
230 # Only add a regression section if the method is overridden
231 if type(self).add_regression_layout is not \
232 OWBaseLearner.add_regression_layout:
233 regression_box = gui.widgetBox(self.controlArea, 'Regression')
234 self.add_regression_layout(regression_box)
235 self.add_bottom_buttons()
236
237 def add_main_layout(self):
238 """Creates layout with the learner configuration widgets.
239
240 Override this method for laying out any learner-specific parameter controls.
241 See setup_layout() method for execution order.
242 """
243 pass
244
245 def add_classification_layout(self, box):
246 """Creates layout for classification specific options.
247
248 If a widget outputs a learner dispatcher, sometimes the classification
249 and regression learners require different options.
250 See `setup_layout()` method for execution order.
251 """
252 pass
253
254 def add_regression_layout(self, box):
255 """Creates layout for regression specific options.
256
257 If a widget outputs a learner dispatcher, sometimes the classification
258 and regression learners require different options.
259 See `setup_layout()` method for execution order.
260 """
261 pass
262
263 def add_learner_name_widget(self):
264 self.name_line_edit = gui.lineEdit(
265 self.controlArea, self, 'learner_name', box='Name',
266 tooltip='The name will identify this model in other widgets',
267 orientation=Qt.Horizontal, callback=self.learner_name_changed)
268
269 def add_bottom_buttons(self):
270 box = gui.hBox(self.controlArea, True)
271 box.layout().addWidget(self.report_button)
272 gui.separator(box, 15)
273 self.apply_button = gui.auto_commit(box, self, 'auto_apply', '&Apply',
274 box=False, commit=self.apply)
275
276 def send(self, signalName, value, id=None):
277 # A subclass might still use the old syntax to send outputs
278 # defined on this class
279 for _, output in getmembers(self.Outputs, Output):
280 if output.name == signalName or signalName in output.replaces:
281 output.send(value, id=id)
282 return
283
284 super().send(signalName, value, id)
285
286 @classmethod
287 def get_widget_description(cls):
288 # When a subclass defines defines old-style signals, those override
289 # the new-style ones, so we add them manually
290 desc = super().get_widget_description()
291
292 if cls.outputs:
293 desc["outputs"].extend(cls.get_signals("outputs", True))
294 if cls.inputs:
295 desc["inputs"].extend(cls.get_signals("inputs", True))
296 return desc
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Orange/widgets/utils/owlearnerwidget.py b/Orange/widgets/utils/owlearnerwidget.py
--- a/Orange/widgets/utils/owlearnerwidget.py
+++ b/Orange/widgets/utils/owlearnerwidget.py
@@ -67,6 +67,7 @@
LEARNER = None
supports_sparse = True
+ learner_name = Setting(None, schema_only=True)
want_main_area = False
resizing_enabled = False
auto_apply = Setting(True)
@@ -96,7 +97,8 @@
self.data = None
self.valid_data = False
self.learner = None
- self.learner_name = self.name
+ if self.learner_name is None:
+ self.learner_name = self.name
self.model = None
self.preprocessors = None
self.outdated_settings = False
| {"golden_diff": "diff --git a/Orange/widgets/utils/owlearnerwidget.py b/Orange/widgets/utils/owlearnerwidget.py\n--- a/Orange/widgets/utils/owlearnerwidget.py\n+++ b/Orange/widgets/utils/owlearnerwidget.py\n@@ -67,6 +67,7 @@\n LEARNER = None\n supports_sparse = True\n \n+ learner_name = Setting(None, schema_only=True)\n want_main_area = False\n resizing_enabled = False\n auto_apply = Setting(True)\n@@ -96,7 +97,8 @@\n self.data = None\n self.valid_data = False\n self.learner = None\n- self.learner_name = self.name\n+ if self.learner_name is None:\n+ self.learner_name = self.name\n self.model = None\n self.preprocessors = None\n self.outdated_settings = False\n", "issue": "Names of models are not saved as settings\n1. Put logistic regression on the canvas.\r\n2. Edit its name\r\n3. Save the schema\r\n4. Close canvas\r\n5. Open Orange and load the schema\r\n\r\nThe logistic regression name is reverted back to Logistic regression.\r\n\r\n--------\r\n\r\nThe base class for learners sets `self.learner_name = self.name` in `__init__`, where `self.name` is the widgets class attribute `name`, which is set later in the derived classes. So `learner_name = Setting(name)` or `learner_name = Setting(OWWidget.name)` won't work since the name is only defined later. Having `learner_name = Setting(\"\")` would require some magic.\r\n\r\n`OWBaseLearner` already has a meta class. This may be the point where the setting can be properly initialized since the `name` is already set then.\n", "before_files": [{"content": "from copy import deepcopy\n\nimport itertools\nimport numpy as np\n\nfrom AnyQt.QtCore import QTimer, Qt\n\nfrom Orange.data import Table\nfrom Orange.modelling import Fitter, Learner, Model\nfrom Orange.preprocess.preprocess import Preprocess\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils import getmembers\nfrom Orange.widgets.utils.signals import Output, Input\nfrom Orange.widgets.utils.sql import check_sql_input\nfrom Orange.widgets.widget import OWWidget, WidgetMetaClass, Msg\n\n\nclass OWBaseLearnerMeta(WidgetMetaClass):\n \"\"\" Meta class for learner widgets\n\n OWBaseLearner declares two outputs, learner and model with\n generic type (Learner and Model).\n\n This metaclass ensures that each of the subclasses gets\n its own Outputs class with output that match the corresponding\n learner.\n \"\"\"\n def __new__(cls, name, bases, attributes):\n def abstract_widget():\n return not attributes.get(\"name\")\n\n def copy_outputs(template):\n result = type(\"Outputs\", (), {})\n for name, signal in getmembers(template, Output):\n setattr(result, name, deepcopy(signal))\n return result\n\n obj = super().__new__(cls, name, bases, attributes)\n if abstract_widget():\n return obj\n\n learner = attributes.get(\"LEARNER\")\n if not learner:\n raise AttributeError(\n \"'{}' must declare attribute LEARNER\".format(name))\n\n outputs = obj.Outputs = copy_outputs(obj.Outputs)\n outputs.learner.type = learner\n outputs.model.type = learner.__returns__\n\n return obj\n\n\nclass OWBaseLearner(OWWidget, metaclass=OWBaseLearnerMeta):\n \"\"\"Abstract widget for classification/regression learners.\n\n Notes\n -----\n All learner widgets should define learner class LEARNER.\n LEARNER should have __returns__ attribute.\n\n Overwrite `create_learner`, `add_main_layout` and `get_learner_parameters`\n in case LEARNER has extra parameters.\n\n \"\"\"\n LEARNER = None\n supports_sparse = True\n\n want_main_area = False\n resizing_enabled = False\n auto_apply = Setting(True)\n\n class Error(OWWidget.Error):\n data_error = Msg(\"{}\")\n fitting_failed = Msg(\"Fitting failed.\\n{}\")\n sparse_not_supported = Msg(\"Sparse data is not supported.\")\n out_of_memory = Msg(\"Out of memory.\")\n\n class Warning(OWWidget.Warning):\n outdated_learner = Msg(\"Press Apply to submit changes.\")\n\n class Inputs:\n data = Input(\"Data\", Table)\n preprocessor = Input(\"Preprocessor\", Preprocess)\n\n class Outputs:\n learner = Output(\"Learner\", Learner, dynamic=False)\n model = Output(\"Model\", Model, dynamic=False,\n replaces=[\"Classifier\", \"Predictor\"])\n\n OUTPUT_MODEL_NAME = Outputs.model.name # Attr for backcompat w/ self.send() code\n\n def __init__(self):\n super().__init__()\n self.data = None\n self.valid_data = False\n self.learner = None\n self.learner_name = self.name\n self.model = None\n self.preprocessors = None\n self.outdated_settings = False\n\n self.setup_layout()\n QTimer.singleShot(0, getattr(self, \"unconditional_apply\", self.apply))\n\n def create_learner(self):\n \"\"\"Creates a learner with current configuration.\n\n Returns:\n Learner: an instance of Orange.base.learner subclass.\n \"\"\"\n return self.LEARNER(preprocessors=self.preprocessors)\n\n def get_learner_parameters(self):\n \"\"\"Creates an `OrderedDict` or a sequence of pairs with current model\n configuration.\n\n Returns:\n OrderedDict or List: (option, value) pairs or dict\n \"\"\"\n return []\n\n @Inputs.preprocessor\n def set_preprocessor(self, preprocessor):\n self.preprocessors = preprocessor\n self.apply()\n\n @Inputs.data\n @check_sql_input\n def set_data(self, data):\n \"\"\"Set the input train data set.\"\"\"\n self.Error.data_error.clear()\n self.data = data\n if data is not None and data.domain.class_var is None:\n self.Error.data_error(\"Data has no target variable.\")\n self.data = None\n\n self.update_model()\n\n def apply(self):\n \"\"\"Applies learner and sends new model.\"\"\"\n self.update_learner()\n self.update_model()\n\n def update_learner(self):\n self.learner = self.create_learner()\n if self.learner and issubclass(self.LEARNER, Fitter):\n self.learner.use_default_preprocessors = True\n if self.learner is not None:\n self.learner.name = self.learner_name\n self.Outputs.learner.send(self.learner)\n self.outdated_settings = False\n self.Warning.outdated_learner.clear()\n\n def show_fitting_failed(self, exc):\n \"\"\"Show error when fitting fails.\n Derived widgets can override this to show more specific messages.\"\"\"\n self.Error.fitting_failed(str(exc), shown=exc is not None)\n\n def update_model(self):\n self.show_fitting_failed(None)\n self.model = None\n if self.check_data():\n try:\n self.model = self.learner(self.data)\n except BaseException as exc:\n self.show_fitting_failed(exc)\n else:\n self.model.name = self.learner_name\n self.model.instances = self.data\n self.Outputs.model.send(self.model)\n\n def check_data(self):\n self.valid_data = False\n self.Error.sparse_not_supported.clear()\n if self.data is not None and self.learner is not None:\n self.Error.data_error.clear()\n if not self.learner.check_learner_adequacy(self.data.domain):\n self.Error.data_error(self.learner.learner_adequacy_err_msg)\n elif not len(self.data):\n self.Error.data_error(\"Data set is empty.\")\n elif len(np.unique(self.data.Y)) < 2:\n self.Error.data_error(\"Data contains a single target value.\")\n elif self.data.X.size == 0:\n self.Error.data_error(\"Data has no features to learn from.\")\n elif self.data.is_sparse() and not self.supports_sparse:\n self.Error.sparse_not_supported()\n else:\n self.valid_data = True\n return self.valid_data\n\n def settings_changed(self, *args, **kwargs):\n self.outdated_settings = True\n self.Warning.outdated_learner(shown=not self.auto_apply)\n self.apply()\n\n def _change_name(self, instance, output):\n if instance:\n instance.name = self.learner_name\n if self.auto_apply:\n output.send(instance)\n\n def learner_name_changed(self):\n self._change_name(self.learner, self.Outputs.learner)\n self._change_name(self.model, self.Outputs.model)\n\n def send_report(self):\n self.report_items(((\"Name\", self.learner_name),))\n\n model_parameters = self.get_learner_parameters()\n if model_parameters:\n self.report_items(\"Model parameters\", model_parameters)\n\n if self.data:\n self.report_data(\"Data\", self.data)\n\n # GUI\n def setup_layout(self):\n self.add_learner_name_widget()\n self.add_main_layout()\n # Options specific to target variable type, if supported\n if issubclass(self.LEARNER, Fitter):\n # Only add a classification section if the method is overridden\n if type(self).add_classification_layout is not \\\n OWBaseLearner.add_classification_layout:\n classification_box = gui.widgetBox(\n self.controlArea, 'Classification')\n self.add_classification_layout(classification_box)\n # Only add a regression section if the method is overridden\n if type(self).add_regression_layout is not \\\n OWBaseLearner.add_regression_layout:\n regression_box = gui.widgetBox(self.controlArea, 'Regression')\n self.add_regression_layout(regression_box)\n self.add_bottom_buttons()\n\n def add_main_layout(self):\n \"\"\"Creates layout with the learner configuration widgets.\n\n Override this method for laying out any learner-specific parameter controls.\n See setup_layout() method for execution order.\n \"\"\"\n pass\n\n def add_classification_layout(self, box):\n \"\"\"Creates layout for classification specific options.\n\n If a widget outputs a learner dispatcher, sometimes the classification\n and regression learners require different options.\n See `setup_layout()` method for execution order.\n \"\"\"\n pass\n\n def add_regression_layout(self, box):\n \"\"\"Creates layout for regression specific options.\n\n If a widget outputs a learner dispatcher, sometimes the classification\n and regression learners require different options.\n See `setup_layout()` method for execution order.\n \"\"\"\n pass\n\n def add_learner_name_widget(self):\n self.name_line_edit = gui.lineEdit(\n self.controlArea, self, 'learner_name', box='Name',\n tooltip='The name will identify this model in other widgets',\n orientation=Qt.Horizontal, callback=self.learner_name_changed)\n\n def add_bottom_buttons(self):\n box = gui.hBox(self.controlArea, True)\n box.layout().addWidget(self.report_button)\n gui.separator(box, 15)\n self.apply_button = gui.auto_commit(box, self, 'auto_apply', '&Apply',\n box=False, commit=self.apply)\n\n def send(self, signalName, value, id=None):\n # A subclass might still use the old syntax to send outputs\n # defined on this class\n for _, output in getmembers(self.Outputs, Output):\n if output.name == signalName or signalName in output.replaces:\n output.send(value, id=id)\n return\n\n super().send(signalName, value, id)\n\n @classmethod\n def get_widget_description(cls):\n # When a subclass defines defines old-style signals, those override\n # the new-style ones, so we add them manually\n desc = super().get_widget_description()\n\n if cls.outputs:\n desc[\"outputs\"].extend(cls.get_signals(\"outputs\", True))\n if cls.inputs:\n desc[\"inputs\"].extend(cls.get_signals(\"inputs\", True))\n return desc\n", "path": "Orange/widgets/utils/owlearnerwidget.py"}], "after_files": [{"content": "from copy import deepcopy\n\nimport itertools\nimport numpy as np\n\nfrom AnyQt.QtCore import QTimer, Qt\n\nfrom Orange.data import Table\nfrom Orange.modelling import Fitter, Learner, Model\nfrom Orange.preprocess.preprocess import Preprocess\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils import getmembers\nfrom Orange.widgets.utils.signals import Output, Input\nfrom Orange.widgets.utils.sql import check_sql_input\nfrom Orange.widgets.widget import OWWidget, WidgetMetaClass, Msg\n\n\nclass OWBaseLearnerMeta(WidgetMetaClass):\n \"\"\" Meta class for learner widgets\n\n OWBaseLearner declares two outputs, learner and model with\n generic type (Learner and Model).\n\n This metaclass ensures that each of the subclasses gets\n its own Outputs class with output that match the corresponding\n learner.\n \"\"\"\n def __new__(cls, name, bases, attributes):\n def abstract_widget():\n return not attributes.get(\"name\")\n\n def copy_outputs(template):\n result = type(\"Outputs\", (), {})\n for name, signal in getmembers(template, Output):\n setattr(result, name, deepcopy(signal))\n return result\n\n obj = super().__new__(cls, name, bases, attributes)\n if abstract_widget():\n return obj\n\n learner = attributes.get(\"LEARNER\")\n if not learner:\n raise AttributeError(\n \"'{}' must declare attribute LEARNER\".format(name))\n\n outputs = obj.Outputs = copy_outputs(obj.Outputs)\n outputs.learner.type = learner\n outputs.model.type = learner.__returns__\n\n return obj\n\n\nclass OWBaseLearner(OWWidget, metaclass=OWBaseLearnerMeta):\n \"\"\"Abstract widget for classification/regression learners.\n\n Notes\n -----\n All learner widgets should define learner class LEARNER.\n LEARNER should have __returns__ attribute.\n\n Overwrite `create_learner`, `add_main_layout` and `get_learner_parameters`\n in case LEARNER has extra parameters.\n\n \"\"\"\n LEARNER = None\n supports_sparse = True\n\n learner_name = Setting(None, schema_only=True)\n want_main_area = False\n resizing_enabled = False\n auto_apply = Setting(True)\n\n class Error(OWWidget.Error):\n data_error = Msg(\"{}\")\n fitting_failed = Msg(\"Fitting failed.\\n{}\")\n sparse_not_supported = Msg(\"Sparse data is not supported.\")\n out_of_memory = Msg(\"Out of memory.\")\n\n class Warning(OWWidget.Warning):\n outdated_learner = Msg(\"Press Apply to submit changes.\")\n\n class Inputs:\n data = Input(\"Data\", Table)\n preprocessor = Input(\"Preprocessor\", Preprocess)\n\n class Outputs:\n learner = Output(\"Learner\", Learner, dynamic=False)\n model = Output(\"Model\", Model, dynamic=False,\n replaces=[\"Classifier\", \"Predictor\"])\n\n OUTPUT_MODEL_NAME = Outputs.model.name # Attr for backcompat w/ self.send() code\n\n def __init__(self):\n super().__init__()\n self.data = None\n self.valid_data = False\n self.learner = None\n if self.learner_name is None:\n self.learner_name = self.name\n self.model = None\n self.preprocessors = None\n self.outdated_settings = False\n\n self.setup_layout()\n QTimer.singleShot(0, getattr(self, \"unconditional_apply\", self.apply))\n\n def create_learner(self):\n \"\"\"Creates a learner with current configuration.\n\n Returns:\n Learner: an instance of Orange.base.learner subclass.\n \"\"\"\n return self.LEARNER(preprocessors=self.preprocessors)\n\n def get_learner_parameters(self):\n \"\"\"Creates an `OrderedDict` or a sequence of pairs with current model\n configuration.\n\n Returns:\n OrderedDict or List: (option, value) pairs or dict\n \"\"\"\n return []\n\n @Inputs.preprocessor\n def set_preprocessor(self, preprocessor):\n self.preprocessors = preprocessor\n self.apply()\n\n @Inputs.data\n @check_sql_input\n def set_data(self, data):\n \"\"\"Set the input train data set.\"\"\"\n self.Error.data_error.clear()\n self.data = data\n if data is not None and data.domain.class_var is None:\n self.Error.data_error(\"Data has no target variable.\")\n self.data = None\n\n self.update_model()\n\n def apply(self):\n \"\"\"Applies learner and sends new model.\"\"\"\n self.update_learner()\n self.update_model()\n\n def update_learner(self):\n self.learner = self.create_learner()\n if self.learner and issubclass(self.LEARNER, Fitter):\n self.learner.use_default_preprocessors = True\n if self.learner is not None:\n self.learner.name = self.learner_name\n self.Outputs.learner.send(self.learner)\n self.outdated_settings = False\n self.Warning.outdated_learner.clear()\n\n def show_fitting_failed(self, exc):\n \"\"\"Show error when fitting fails.\n Derived widgets can override this to show more specific messages.\"\"\"\n self.Error.fitting_failed(str(exc), shown=exc is not None)\n\n def update_model(self):\n self.show_fitting_failed(None)\n self.model = None\n if self.check_data():\n try:\n self.model = self.learner(self.data)\n except BaseException as exc:\n self.show_fitting_failed(exc)\n else:\n self.model.name = self.learner_name\n self.model.instances = self.data\n self.Outputs.model.send(self.model)\n\n def check_data(self):\n self.valid_data = False\n self.Error.sparse_not_supported.clear()\n if self.data is not None and self.learner is not None:\n self.Error.data_error.clear()\n if not self.learner.check_learner_adequacy(self.data.domain):\n self.Error.data_error(self.learner.learner_adequacy_err_msg)\n elif not len(self.data):\n self.Error.data_error(\"Data set is empty.\")\n elif len(np.unique(self.data.Y)) < 2:\n self.Error.data_error(\"Data contains a single target value.\")\n elif self.data.X.size == 0:\n self.Error.data_error(\"Data has no features to learn from.\")\n elif self.data.is_sparse() and not self.supports_sparse:\n self.Error.sparse_not_supported()\n else:\n self.valid_data = True\n return self.valid_data\n\n def settings_changed(self, *args, **kwargs):\n self.outdated_settings = True\n self.Warning.outdated_learner(shown=not self.auto_apply)\n self.apply()\n\n def _change_name(self, instance, output):\n if instance:\n instance.name = self.learner_name\n if self.auto_apply:\n output.send(instance)\n\n def learner_name_changed(self):\n self._change_name(self.learner, self.Outputs.learner)\n self._change_name(self.model, self.Outputs.model)\n\n def send_report(self):\n self.report_items(((\"Name\", self.learner_name),))\n\n model_parameters = self.get_learner_parameters()\n if model_parameters:\n self.report_items(\"Model parameters\", model_parameters)\n\n if self.data:\n self.report_data(\"Data\", self.data)\n\n # GUI\n def setup_layout(self):\n self.add_learner_name_widget()\n self.add_main_layout()\n # Options specific to target variable type, if supported\n if issubclass(self.LEARNER, Fitter):\n # Only add a classification section if the method is overridden\n if type(self).add_classification_layout is not \\\n OWBaseLearner.add_classification_layout:\n classification_box = gui.widgetBox(\n self.controlArea, 'Classification')\n self.add_classification_layout(classification_box)\n # Only add a regression section if the method is overridden\n if type(self).add_regression_layout is not \\\n OWBaseLearner.add_regression_layout:\n regression_box = gui.widgetBox(self.controlArea, 'Regression')\n self.add_regression_layout(regression_box)\n self.add_bottom_buttons()\n\n def add_main_layout(self):\n \"\"\"Creates layout with the learner configuration widgets.\n\n Override this method for laying out any learner-specific parameter controls.\n See setup_layout() method for execution order.\n \"\"\"\n pass\n\n def add_classification_layout(self, box):\n \"\"\"Creates layout for classification specific options.\n\n If a widget outputs a learner dispatcher, sometimes the classification\n and regression learners require different options.\n See `setup_layout()` method for execution order.\n \"\"\"\n pass\n\n def add_regression_layout(self, box):\n \"\"\"Creates layout for regression specific options.\n\n If a widget outputs a learner dispatcher, sometimes the classification\n and regression learners require different options.\n See `setup_layout()` method for execution order.\n \"\"\"\n pass\n\n def add_learner_name_widget(self):\n self.name_line_edit = gui.lineEdit(\n self.controlArea, self, 'learner_name', box='Name',\n tooltip='The name will identify this model in other widgets',\n orientation=Qt.Horizontal, callback=self.learner_name_changed)\n\n def add_bottom_buttons(self):\n box = gui.hBox(self.controlArea, True)\n box.layout().addWidget(self.report_button)\n gui.separator(box, 15)\n self.apply_button = gui.auto_commit(box, self, 'auto_apply', '&Apply',\n box=False, commit=self.apply)\n\n def send(self, signalName, value, id=None):\n # A subclass might still use the old syntax to send outputs\n # defined on this class\n for _, output in getmembers(self.Outputs, Output):\n if output.name == signalName or signalName in output.replaces:\n output.send(value, id=id)\n return\n\n super().send(signalName, value, id)\n\n @classmethod\n def get_widget_description(cls):\n # When a subclass defines defines old-style signals, those override\n # the new-style ones, so we add them manually\n desc = super().get_widget_description()\n\n if cls.outputs:\n desc[\"outputs\"].extend(cls.get_signals(\"outputs\", True))\n if cls.inputs:\n desc[\"inputs\"].extend(cls.get_signals(\"inputs\", True))\n return desc\n", "path": "Orange/widgets/utils/owlearnerwidget.py"}]} | 3,407 | 188 |
gh_patches_debug_37684 | rasdani/github-patches | git_diff | pulp__pulpcore-4335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need a way to directly migrate pulp_labels from 3.21 to 3.28
**Is your feature request related to a problem? Please describe.**
As RHUI developers, we are heavy users of pulp_labels on repository objects. As the current 4.5 release is on pulpcore 3.21, we would like our next release (4.6) to be based on pulpcore 3.28, and we need to ensure a smooth upgrade path for our customers. As it is currently laid out, the 3.21 to 3.28 migration path requires an intermediate step (<3.25) where `pulpcore-manager datarepair-labels` command is available.
**Describe the solution you'd like**
The ideal solution would be to make Django migrations take care of that automatically, without requiring an intermediate step/release. Since we live in non-ideal world, a solution where an additional command like `datarepair-labels` is available would also suffice.
**Describe alternatives you've considered**
We have considered an intermediate release of RHUI 4.6 with pulpcore 3.22 followed by RHUI 4.7 built on 3.28, but it would create a lot of migration issues for our customers, with many of them still running RHUI 4.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/migrations/0104_delete_label.py`
Content:
```
1 # Generated by Django 3.2.18 on 2023-05-04 07:06
2
3 from django.db import migrations
4
5
6 def check_no_existing_labels(apps, schema_editor):
7 Label = apps.get_model("core", "Label")
8 if Label.objects.exists():
9 raise RuntimeError(
10 "There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted."
11 )
12
13
14 class Migration(migrations.Migration):
15
16 dependencies = [
17 ('core', '0103_alter_export_task'),
18 ]
19
20 operations = [
21 migrations.RunPython(
22 code=check_no_existing_labels,
23 reverse_code=migrations.RunPython.noop,
24 elidable=True,
25 ),
26 migrations.DeleteModel(
27 name='Label',
28 ),
29 ]
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/migrations/0104_delete_label.py b/pulpcore/app/migrations/0104_delete_label.py
--- a/pulpcore/app/migrations/0104_delete_label.py
+++ b/pulpcore/app/migrations/0104_delete_label.py
@@ -1,6 +1,45 @@
# Generated by Django 3.2.18 on 2023-05-04 07:06
from django.db import migrations
+from django.db.models.expressions import OuterRef, RawSQL
+
+
+def migrate_remaining_labels(apps, schema_editor):
+ """
+ This data migration handles the "but what about plugins" problem noted in the issue [0], with only two caveats:
+
+ Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) "Repository",
+ that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then,
+ upon running this migration, those Labels would be lost.
+
+ Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository,
+ but named something like "PluginRepositoryButWhy", and that plugin allowed Labels,
+ and instances of such a Model had Labels associated with them - then this migration would fail,
+ because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped.
+
+ And the plugins described above would have to have existed and been in use with pulpcore/3.21,only -
+ if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well.
+
+ No such plugins/Models exist, to the best of our knowledge.
+
+ [0] https://github.com/pulp/pulpcore/issues/4319
+ """
+ Label = apps.get_model("core", "Label")
+ Repository = apps.get_model("core", "Repository")
+ Remote = apps.get_model("core", "Remote")
+ Distribution = apps.get_model("core", "Distribution")
+ ContentType = apps.get_model("contenttypes", "ContentType")
+
+ for master_model, model_name in [(Repository, "repository"), (Remote, "remote"), (Distribution, "distribution")]:
+ detail_ctypes = ContentType.objects.filter(app_label__ne="core", model__endswith=model_name)
+ affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values("object_id").distinct()
+ label_subq = Label.objects.filter(
+ content_type__in=detail_ctypes, object_id=OuterRef("pulp_id")
+ ).annotate(
+ label_data=RawSQL("hstore(array_agg(key), array_agg(value))", [])
+ ).values("label_data")
+ master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq)
+ Label.objects.filter(content_type__in=detail_ctypes).delete()
def check_no_existing_labels(apps, schema_editor):
@@ -18,11 +57,20 @@
]
operations = [
+ migrations.RunPython(
+ code=migrate_remaining_labels,
+ reverse_code=migrations.RunPython.noop,
+ elidable=True,
+ ),
migrations.RunPython(
code=check_no_existing_labels,
reverse_code=migrations.RunPython.noop,
elidable=True,
),
+ migrations.RunSQL(
+ sql="SET CONSTRAINTS ALL IMMEDIATE;",
+ reverse_sql="",
+ ),
migrations.DeleteModel(
name='Label',
),
| {"golden_diff": "diff --git a/pulpcore/app/migrations/0104_delete_label.py b/pulpcore/app/migrations/0104_delete_label.py\n--- a/pulpcore/app/migrations/0104_delete_label.py\n+++ b/pulpcore/app/migrations/0104_delete_label.py\n@@ -1,6 +1,45 @@\n # Generated by Django 3.2.18 on 2023-05-04 07:06\n \n from django.db import migrations\n+from django.db.models.expressions import OuterRef, RawSQL\n+\n+\n+def migrate_remaining_labels(apps, schema_editor):\n+ \"\"\"\n+ This data migration handles the \"but what about plugins\" problem noted in the issue [0], with only two caveats:\n+\n+ Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) \"Repository\",\n+ that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then,\n+ upon running this migration, those Labels would be lost.\n+\n+ Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository,\n+ but named something like \"PluginRepositoryButWhy\", and that plugin allowed Labels,\n+ and instances of such a Model had Labels associated with them - then this migration would fail,\n+ because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped.\n+\n+ And the plugins described above would have to have existed and been in use with pulpcore/3.21,only -\n+ if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well.\n+\n+ No such plugins/Models exist, to the best of our knowledge.\n+\n+ [0] https://github.com/pulp/pulpcore/issues/4319\n+ \"\"\"\n+ Label = apps.get_model(\"core\", \"Label\")\n+ Repository = apps.get_model(\"core\", \"Repository\")\n+ Remote = apps.get_model(\"core\", \"Remote\")\n+ Distribution = apps.get_model(\"core\", \"Distribution\")\n+ ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n+\n+ for master_model, model_name in [(Repository, \"repository\"), (Remote, \"remote\"), (Distribution, \"distribution\")]:\n+ detail_ctypes = ContentType.objects.filter(app_label__ne=\"core\", model__endswith=model_name)\n+ affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values(\"object_id\").distinct()\n+ label_subq = Label.objects.filter(\n+ content_type__in=detail_ctypes, object_id=OuterRef(\"pulp_id\")\n+ ).annotate(\n+ label_data=RawSQL(\"hstore(array_agg(key), array_agg(value))\", [])\n+ ).values(\"label_data\")\n+ master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq)\n+ Label.objects.filter(content_type__in=detail_ctypes).delete()\n \n \n def check_no_existing_labels(apps, schema_editor):\n@@ -18,11 +57,20 @@\n ]\n \n operations = [\n+ migrations.RunPython(\n+ code=migrate_remaining_labels,\n+ reverse_code=migrations.RunPython.noop,\n+ elidable=True,\n+ ),\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n+ migrations.RunSQL(\n+ sql=\"SET CONSTRAINTS ALL IMMEDIATE;\",\n+ reverse_sql=\"\",\n+ ),\n migrations.DeleteModel(\n name='Label',\n ),\n", "issue": "Need a way to directly migrate pulp_labels from 3.21 to 3.28\n**Is your feature request related to a problem? Please describe.**\r\nAs RHUI developers, we are heavy users of pulp_labels on repository objects. As the current 4.5 release is on pulpcore 3.21, we would like our next release (4.6) to be based on pulpcore 3.28, and we need to ensure a smooth upgrade path for our customers. As it is currently laid out, the 3.21 to 3.28 migration path requires an intermediate step (<3.25) where `pulpcore-manager datarepair-labels` command is available.\r\n\r\n**Describe the solution you'd like**\r\nThe ideal solution would be to make Django migrations take care of that automatically, without requiring an intermediate step/release. Since we live in non-ideal world, a solution where an additional command like `datarepair-labels` is available would also suffice.\r\n\r\n**Describe alternatives you've considered**\r\nWe have considered an intermediate release of RHUI 4.6 with pulpcore 3.22 followed by RHUI 4.7 built on 3.28, but it would create a lot of migration issues for our customers, with many of them still running RHUI 4.0.\r\n\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.18 on 2023-05-04 07:06\n\nfrom django.db import migrations\n\n\ndef check_no_existing_labels(apps, schema_editor):\n Label = apps.get_model(\"core\", \"Label\")\n if Label.objects.exists():\n raise RuntimeError(\n \"There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted.\"\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0103_alter_export_task'),\n ]\n\n operations = [\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n migrations.DeleteModel(\n name='Label',\n ),\n ]\n", "path": "pulpcore/app/migrations/0104_delete_label.py"}], "after_files": [{"content": "# Generated by Django 3.2.18 on 2023-05-04 07:06\n\nfrom django.db import migrations\nfrom django.db.models.expressions import OuterRef, RawSQL\n\n\ndef migrate_remaining_labels(apps, schema_editor):\n \"\"\"\n This data migration handles the \"but what about plugins\" problem noted in the issue [0], with only two caveats:\n\n Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) \"Repository\",\n that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then,\n upon running this migration, those Labels would be lost.\n\n Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository,\n but named something like \"PluginRepositoryButWhy\", and that plugin allowed Labels,\n and instances of such a Model had Labels associated with them - then this migration would fail,\n because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped.\n\n And the plugins described above would have to have existed and been in use with pulpcore/3.21,only -\n if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well.\n\n No such plugins/Models exist, to the best of our knowledge.\n\n [0] https://github.com/pulp/pulpcore/issues/4319\n \"\"\"\n Label = apps.get_model(\"core\", \"Label\")\n Repository = apps.get_model(\"core\", \"Repository\")\n Remote = apps.get_model(\"core\", \"Remote\")\n Distribution = apps.get_model(\"core\", \"Distribution\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n for master_model, model_name in [(Repository, \"repository\"), (Remote, \"remote\"), (Distribution, \"distribution\")]:\n detail_ctypes = ContentType.objects.filter(app_label__ne=\"core\", model__endswith=model_name)\n affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values(\"object_id\").distinct()\n label_subq = Label.objects.filter(\n content_type__in=detail_ctypes, object_id=OuterRef(\"pulp_id\")\n ).annotate(\n label_data=RawSQL(\"hstore(array_agg(key), array_agg(value))\", [])\n ).values(\"label_data\")\n master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq)\n Label.objects.filter(content_type__in=detail_ctypes).delete()\n\n\ndef check_no_existing_labels(apps, schema_editor):\n Label = apps.get_model(\"core\", \"Label\")\n if Label.objects.exists():\n raise RuntimeError(\n \"There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted.\"\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0103_alter_export_task'),\n ]\n\n operations = [\n migrations.RunPython(\n code=migrate_remaining_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n migrations.RunSQL(\n sql=\"SET CONSTRAINTS ALL IMMEDIATE;\",\n reverse_sql=\"\",\n ),\n migrations.DeleteModel(\n name='Label',\n ),\n ]\n", "path": "pulpcore/app/migrations/0104_delete_label.py"}]} | 786 | 814 |
gh_patches_debug_11491 | rasdani/github-patches | git_diff | scikit-hep__pyhf-444 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyhf commandline tools requires uproot extra
# Description
just issuing `pyhf --help` requires the uproot extra since `commandline.py` imports `writexml`.
# Expected Behavior
I don't need uproot if I don't want to use json2xml or xml2json
# Actual Behavior
I can't use `pyhf` without installing uproot.
# Steps to Reproduce
install master and run pyhf
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyhf/commandline.py`
Content:
```
1 import logging
2
3 import click
4 import json
5 import os
6
7 from . import writexml
8 from .utils import hypotest
9 from .pdf import Workspace
10 from .version import __version__
11
12 logging.basicConfig()
13 log = logging.getLogger(__name__)
14
15 # This is only needed for Python 2/3 compatibility
16 def ensure_dirs(path):
17 try:
18 os.makedirs(path, exist_ok=True)
19 except TypeError:
20 if not os.path.exists(path):
21 os.makedirs(path)
22
23
24 @click.group(context_settings=dict(help_option_names=['-h', '--help']))
25 @click.version_option(version=__version__)
26 def pyhf():
27 pass
28
29
30 @pyhf.command()
31 @click.argument('entrypoint-xml', type=click.Path(exists=True))
32 @click.option(
33 '--basedir',
34 help='The base directory for the XML files to point relative to.',
35 type=click.Path(exists=True),
36 default=os.getcwd(),
37 )
38 @click.option(
39 '--output-file',
40 help='The location of the output json file. If not specified, prints to screen.',
41 default=None,
42 )
43 @click.option('--track-progress/--hide-progress', default=True)
44 def xml2json(entrypoint_xml, basedir, output_file, track_progress):
45 """ Entrypoint XML: The top-level XML file for the PDF definition. """
46 try:
47 import uproot
48
49 assert uproot
50 except ImportError:
51 log.error(
52 "xml2json requires uproot, please install pyhf using the "
53 "xmlio extra: pip install pyhf[xmlio] or install uproot "
54 "manually: pip install uproot"
55 )
56 from . import readxml
57
58 spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)
59 if output_file is None:
60 print(json.dumps(spec, indent=4, sort_keys=True))
61 else:
62 with open(output_file, 'w+') as out_file:
63 json.dump(spec, out_file, indent=4, sort_keys=True)
64 log.debug("Written to {0:s}".format(output_file))
65
66
67 @pyhf.command()
68 @click.argument('workspace', default='-')
69 @click.option('--output-dir', type=click.Path(exists=True), default='.')
70 @click.option('--specroot', default='config')
71 @click.option('--dataroot', default='data')
72 @click.option('--resultprefix', default='FitConfig')
73 def json2xml(workspace, output_dir, specroot, dataroot, resultprefix):
74 try:
75 import uproot
76
77 assert uproot
78 except ImportError:
79 log.error(
80 "json2xml requires uproot, please install pyhf using the "
81 "xmlio extra: pip install pyhf[xmlio] or install uproot "
82 "manually: pip install uproot"
83 )
84
85 ensure_dirs(output_dir)
86 with click.open_file(workspace, 'r') as specstream:
87 d = json.load(specstream)
88 ensure_dirs(os.path.join(output_dir, specroot))
89 ensure_dirs(os.path.join(output_dir, dataroot))
90 with click.open_file(
91 os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w'
92 ) as outstream:
93 outstream.write(
94 writexml.writexml(
95 d,
96 os.path.join(output_dir, specroot),
97 os.path.join(output_dir, dataroot),
98 resultprefix,
99 ).decode('utf-8')
100 )
101
102
103 @pyhf.command()
104 @click.argument('workspace', default='-')
105 @click.option(
106 '--output-file',
107 help='The location of the output json file. If not specified, prints to screen.',
108 default=None,
109 )
110 @click.option('--measurement', default=None)
111 @click.option('-p', '--patch', multiple=True)
112 @click.option('--testpoi', default=1.0)
113 def cls(workspace, output_file, measurement, patch, testpoi):
114 with click.open_file(workspace, 'r') as specstream:
115 wspec = json.load(specstream)
116
117 w = Workspace(wspec)
118
119 patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]
120 p = w.model(measurement_name=measurement, patches=patches)
121 result = hypotest(testpoi, w.data(p), p, return_expected_set=True)
122 result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}
123 if output_file is None:
124 print(json.dumps(result, indent=4, sort_keys=True))
125 else:
126 with open(output_file, 'w+') as out_file:
127 json.dump(result, out_file, indent=4, sort_keys=True)
128 log.debug("Written to {0:s}".format(output_file))
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyhf/commandline.py b/pyhf/commandline.py
--- a/pyhf/commandline.py
+++ b/pyhf/commandline.py
@@ -4,7 +4,6 @@
import json
import os
-from . import writexml
from .utils import hypotest
from .pdf import Workspace
from .version import __version__
@@ -81,6 +80,7 @@
"xmlio extra: pip install pyhf[xmlio] or install uproot "
"manually: pip install uproot"
)
+ from . import writexml
ensure_dirs(output_dir)
with click.open_file(workspace, 'r') as specstream:
| {"golden_diff": "diff --git a/pyhf/commandline.py b/pyhf/commandline.py\n--- a/pyhf/commandline.py\n+++ b/pyhf/commandline.py\n@@ -4,7 +4,6 @@\n import json\n import os\n \n-from . import writexml\n from .utils import hypotest\n from .pdf import Workspace\n from .version import __version__\n@@ -81,6 +80,7 @@\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n+ from . import writexml\n \n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n", "issue": "pyhf commandline tools requires uproot extra\n# Description\r\n\r\njust issuing `pyhf --help` requires the uproot extra since `commandline.py` imports `writexml`.\r\n\r\n# Expected Behavior\r\n\r\nI don't need uproot if I don't want to use json2xml or xml2json\r\n\r\n# Actual Behavior\r\n\r\nI can't use `pyhf` without installing uproot.\r\n\r\n# Steps to Reproduce\r\n\r\ninstall master and run pyhf\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "import logging\n\nimport click\nimport json\nimport os\n\nfrom . import writexml\nfrom .utils import hypotest\nfrom .pdf import Workspace\nfrom .version import __version__\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n# This is only needed for Python 2/3 compatibility\ndef ensure_dirs(path):\n try:\n os.makedirs(path, exist_ok=True)\n except TypeError:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\ndef pyhf():\n pass\n\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected](\n '--basedir',\n help='The base directory for the XML files to point relative to.',\n type=click.Path(exists=True),\n default=os.getcwd(),\n)\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"xml2json requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n from . import readxml\n\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-dir', type=click.Path(exists=True), default='.')\[email protected]('--specroot', default='config')\[email protected]('--dataroot', default='data')\[email protected]('--resultprefix', default='FitConfig')\ndef json2xml(workspace, output_dir, specroot, dataroot, resultprefix):\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"json2xml requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n\n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n ensure_dirs(os.path.join(output_dir, specroot))\n ensure_dirs(os.path.join(output_dir, dataroot))\n with click.open_file(\n os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w'\n ) as outstream:\n outstream.write(\n writexml.writexml(\n d,\n os.path.join(output_dir, specroot),\n os.path.join(output_dir, dataroot),\n resultprefix,\n ).decode('utf-8')\n )\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\ndef cls(workspace, output_file, measurement, patch, testpoi):\n with click.open_file(workspace, 'r') as specstream:\n wspec = json.load(specstream)\n\n w = Workspace(wspec)\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n p = w.model(measurement_name=measurement, patches=patches)\n result = hypotest(testpoi, w.data(p), p, return_expected_set=True)\n result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "pyhf/commandline.py"}], "after_files": [{"content": "import logging\n\nimport click\nimport json\nimport os\n\nfrom .utils import hypotest\nfrom .pdf import Workspace\nfrom .version import __version__\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n# This is only needed for Python 2/3 compatibility\ndef ensure_dirs(path):\n try:\n os.makedirs(path, exist_ok=True)\n except TypeError:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\ndef pyhf():\n pass\n\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected](\n '--basedir',\n help='The base directory for the XML files to point relative to.',\n type=click.Path(exists=True),\n default=os.getcwd(),\n)\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"xml2json requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n from . import readxml\n\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-dir', type=click.Path(exists=True), default='.')\[email protected]('--specroot', default='config')\[email protected]('--dataroot', default='data')\[email protected]('--resultprefix', default='FitConfig')\ndef json2xml(workspace, output_dir, specroot, dataroot, resultprefix):\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"json2xml requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n from . import writexml\n\n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n ensure_dirs(os.path.join(output_dir, specroot))\n ensure_dirs(os.path.join(output_dir, dataroot))\n with click.open_file(\n os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w'\n ) as outstream:\n outstream.write(\n writexml.writexml(\n d,\n os.path.join(output_dir, specroot),\n os.path.join(output_dir, dataroot),\n resultprefix,\n ).decode('utf-8')\n )\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\ndef cls(workspace, output_file, measurement, patch, testpoi):\n with click.open_file(workspace, 'r') as specstream:\n wspec = json.load(specstream)\n\n w = Workspace(wspec)\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n p = w.model(measurement_name=measurement, patches=patches)\n result = hypotest(testpoi, w.data(p), p, return_expected_set=True)\n result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "pyhf/commandline.py"}]} | 1,734 | 148 |
gh_patches_debug_41840 | rasdani/github-patches | git_diff | encode__starlette-1360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mypy complains about `exception_handlers` dict with `HTTPException` key
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
Related, but not the same:
* #1034
* #1139
* #949
### Describe the bug
<!-- A clear and concise description of what the bug is. -->
### To reproduce
Given an example.py:
```python
from starlette.applications import Starlette
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route
def handle_http_exception(request: Request, exc: HTTPException) -> Response:
return Response('foo')
exception_handlers = {
HTTPException: handle_http_exception
}
app = Starlette(exception_handlers=exception_handlers)
```
### Expected behavior
Mypy does not present an error.
### Actual behavior
```console
$ mypy example.py
example.py:14: error: Argument "exception_handlers" to "Starlette" has incompatible type "Dict[Type[HTTPException], Callable[[Request, HTTPException], Response]]"; expected "Optional[Dict[Union[int, Type[Exception]], Callable[..., Any]]]"
Found 1 error in 1 file (checked 1 source file)
```
### Debugging material
None
### Environment
- OS: Debian (`Linux ... 4.19.0-14-amd64 #1 SMP Debian 4.19.171-2 (2021-01-30) x86_64 GNU/Linux`)
- Python version: `Python 3.9.1`
- Starlette version: `starlette==0.14.2`
### Additional context
For propriety:
https://github.com/encode/starlette/blob/master/docs/exceptions.md#L42
https://github.com/encode/starlette/blob/ed73b97c9b8f4aa527eed88032c04ce9fe109a3c/starlette/applications.py#L44
I think this is a Starlette issue and not a mypy issue; apologies if I'm wrong here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/exceptions.py`
Content:
```
1 import asyncio
2 import http
3 import typing
4
5 from starlette.concurrency import run_in_threadpool
6 from starlette.requests import Request
7 from starlette.responses import PlainTextResponse, Response
8 from starlette.types import ASGIApp, Message, Receive, Scope, Send
9
10
11 class HTTPException(Exception):
12 def __init__(self, status_code: int, detail: str = None) -> None:
13 if detail is None:
14 detail = http.HTTPStatus(status_code).phrase
15 self.status_code = status_code
16 self.detail = detail
17
18 def __repr__(self) -> str:
19 class_name = self.__class__.__name__
20 return f"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})"
21
22
23 class ExceptionMiddleware:
24 def __init__(
25 self, app: ASGIApp, handlers: dict = None, debug: bool = False
26 ) -> None:
27 self.app = app
28 self.debug = debug # TODO: We ought to handle 404 cases if debug is set.
29 self._status_handlers: typing.Dict[int, typing.Callable] = {}
30 self._exception_handlers: typing.Dict[
31 typing.Type[Exception], typing.Callable
32 ] = {HTTPException: self.http_exception}
33 if handlers is not None:
34 for key, value in handlers.items():
35 self.add_exception_handler(key, value)
36
37 def add_exception_handler(
38 self,
39 exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],
40 handler: typing.Callable,
41 ) -> None:
42 if isinstance(exc_class_or_status_code, int):
43 self._status_handlers[exc_class_or_status_code] = handler
44 else:
45 assert issubclass(exc_class_or_status_code, Exception)
46 self._exception_handlers[exc_class_or_status_code] = handler
47
48 def _lookup_exception_handler(
49 self, exc: Exception
50 ) -> typing.Optional[typing.Callable]:
51 for cls in type(exc).__mro__:
52 if cls in self._exception_handlers:
53 return self._exception_handlers[cls]
54 return None
55
56 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
57 if scope["type"] != "http":
58 await self.app(scope, receive, send)
59 return
60
61 response_started = False
62
63 async def sender(message: Message) -> None:
64 nonlocal response_started
65
66 if message["type"] == "http.response.start":
67 response_started = True
68 await send(message)
69
70 try:
71 await self.app(scope, receive, sender)
72 except Exception as exc:
73 handler = None
74
75 if isinstance(exc, HTTPException):
76 handler = self._status_handlers.get(exc.status_code)
77
78 if handler is None:
79 handler = self._lookup_exception_handler(exc)
80
81 if handler is None:
82 raise exc
83
84 if response_started:
85 msg = "Caught handled exception, but response already started."
86 raise RuntimeError(msg) from exc
87
88 request = Request(scope, receive=receive)
89 if asyncio.iscoroutinefunction(handler):
90 response = await handler(request, exc)
91 else:
92 response = await run_in_threadpool(handler, request, exc)
93 await response(scope, receive, sender)
94
95 def http_exception(self, request: Request, exc: HTTPException) -> Response:
96 if exc.status_code in {204, 304}:
97 return Response(b"", status_code=exc.status_code)
98 return PlainTextResponse(exc.detail, status_code=exc.status_code)
99
```
Path: `starlette/applications.py`
Content:
```
1 import typing
2
3 from starlette.datastructures import State, URLPath
4 from starlette.exceptions import ExceptionMiddleware
5 from starlette.middleware import Middleware
6 from starlette.middleware.base import BaseHTTPMiddleware
7 from starlette.middleware.errors import ServerErrorMiddleware
8 from starlette.routing import BaseRoute, Router
9 from starlette.types import ASGIApp, Receive, Scope, Send
10
11
12 class Starlette:
13 """
14 Creates an application instance.
15
16 **Parameters:**
17
18 * **debug** - Boolean indicating if debug tracebacks should be returned on errors.
19 * **routes** - A list of routes to serve incoming HTTP and WebSocket requests.
20 * **middleware** - A list of middleware to run for every request. A starlette
21 application will always automatically include two middleware classes.
22 `ServerErrorMiddleware` is added as the very outermost middleware, to handle
23 any uncaught errors occurring anywhere in the entire stack.
24 `ExceptionMiddleware` is added as the very innermost middleware, to deal
25 with handled exception cases occurring in the routing or endpoints.
26 * **exception_handlers** - A dictionary mapping either integer status codes,
27 or exception class types onto callables which handle the exceptions.
28 Exception handler callables should be of the form
29 `handler(request, exc) -> response` and may be be either standard functions, or
30 async functions.
31 * **on_startup** - A list of callables to run on application startup.
32 Startup handler callables do not take any arguments, and may be be either
33 standard functions, or async functions.
34 * **on_shutdown** - A list of callables to run on application shutdown.
35 Shutdown handler callables do not take any arguments, and may be be either
36 standard functions, or async functions.
37 """
38
39 def __init__(
40 self,
41 debug: bool = False,
42 routes: typing.Sequence[BaseRoute] = None,
43 middleware: typing.Sequence[Middleware] = None,
44 exception_handlers: typing.Dict[
45 typing.Union[int, typing.Type[Exception]], typing.Callable
46 ] = None,
47 on_startup: typing.Sequence[typing.Callable] = None,
48 on_shutdown: typing.Sequence[typing.Callable] = None,
49 lifespan: typing.Callable[["Starlette"], typing.AsyncContextManager] = None,
50 ) -> None:
51 # The lifespan context function is a newer style that replaces
52 # on_startup / on_shutdown handlers. Use one or the other, not both.
53 assert lifespan is None or (
54 on_startup is None and on_shutdown is None
55 ), "Use either 'lifespan' or 'on_startup'/'on_shutdown', not both."
56
57 self._debug = debug
58 self.state = State()
59 self.router = Router(
60 routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan
61 )
62 self.exception_handlers = (
63 {} if exception_handlers is None else dict(exception_handlers)
64 )
65 self.user_middleware = [] if middleware is None else list(middleware)
66 self.middleware_stack = self.build_middleware_stack()
67
68 def build_middleware_stack(self) -> ASGIApp:
69 debug = self.debug
70 error_handler = None
71 exception_handlers = {}
72
73 for key, value in self.exception_handlers.items():
74 if key in (500, Exception):
75 error_handler = value
76 else:
77 exception_handlers[key] = value
78
79 middleware = (
80 [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]
81 + self.user_middleware
82 + [
83 Middleware(
84 ExceptionMiddleware, handlers=exception_handlers, debug=debug
85 )
86 ]
87 )
88
89 app = self.router
90 for cls, options in reversed(middleware):
91 app = cls(app=app, **options)
92 return app
93
94 @property
95 def routes(self) -> typing.List[BaseRoute]:
96 return self.router.routes
97
98 @property
99 def debug(self) -> bool:
100 return self._debug
101
102 @debug.setter
103 def debug(self, value: bool) -> None:
104 self._debug = value
105 self.middleware_stack = self.build_middleware_stack()
106
107 def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:
108 return self.router.url_path_for(name, **path_params)
109
110 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
111 scope["app"] = self
112 await self.middleware_stack(scope, receive, send)
113
114 # The following usages are now discouraged in favour of configuration
115 # during Starlette.__init__(...)
116 def on_event(self, event_type: str) -> typing.Callable:
117 return self.router.on_event(event_type)
118
119 def mount(self, path: str, app: ASGIApp, name: str = None) -> None:
120 self.router.mount(path, app=app, name=name)
121
122 def host(self, host: str, app: ASGIApp, name: str = None) -> None:
123 self.router.host(host, app=app, name=name)
124
125 def add_middleware(self, middleware_class: type, **options: typing.Any) -> None:
126 self.user_middleware.insert(0, Middleware(middleware_class, **options))
127 self.middleware_stack = self.build_middleware_stack()
128
129 def add_exception_handler(
130 self,
131 exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],
132 handler: typing.Callable,
133 ) -> None:
134 self.exception_handlers[exc_class_or_status_code] = handler
135 self.middleware_stack = self.build_middleware_stack()
136
137 def add_event_handler(self, event_type: str, func: typing.Callable) -> None:
138 self.router.add_event_handler(event_type, func)
139
140 def add_route(
141 self,
142 path: str,
143 route: typing.Callable,
144 methods: typing.List[str] = None,
145 name: str = None,
146 include_in_schema: bool = True,
147 ) -> None:
148 self.router.add_route(
149 path, route, methods=methods, name=name, include_in_schema=include_in_schema
150 )
151
152 def add_websocket_route(
153 self, path: str, route: typing.Callable, name: str = None
154 ) -> None:
155 self.router.add_websocket_route(path, route, name=name)
156
157 def exception_handler(
158 self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]]
159 ) -> typing.Callable:
160 def decorator(func: typing.Callable) -> typing.Callable:
161 self.add_exception_handler(exc_class_or_status_code, func)
162 return func
163
164 return decorator
165
166 def route(
167 self,
168 path: str,
169 methods: typing.List[str] = None,
170 name: str = None,
171 include_in_schema: bool = True,
172 ) -> typing.Callable:
173 def decorator(func: typing.Callable) -> typing.Callable:
174 self.router.add_route(
175 path,
176 func,
177 methods=methods,
178 name=name,
179 include_in_schema=include_in_schema,
180 )
181 return func
182
183 return decorator
184
185 def websocket_route(self, path: str, name: str = None) -> typing.Callable:
186 def decorator(func: typing.Callable) -> typing.Callable:
187 self.router.add_websocket_route(path, func, name=name)
188 return func
189
190 return decorator
191
192 def middleware(self, middleware_type: str) -> typing.Callable:
193 assert (
194 middleware_type == "http"
195 ), 'Currently only middleware("http") is supported.'
196
197 def decorator(func: typing.Callable) -> typing.Callable:
198 self.add_middleware(BaseHTTPMiddleware, dispatch=func)
199 return func
200
201 return decorator
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/applications.py b/starlette/applications.py
--- a/starlette/applications.py
+++ b/starlette/applications.py
@@ -5,6 +5,8 @@
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.middleware.errors import ServerErrorMiddleware
+from starlette.requests import Request
+from starlette.responses import Response
from starlette.routing import BaseRoute, Router
from starlette.types import ASGIApp, Receive, Scope, Send
@@ -23,7 +25,7 @@
any uncaught errors occurring anywhere in the entire stack.
`ExceptionMiddleware` is added as the very innermost middleware, to deal
with handled exception cases occurring in the routing or endpoints.
- * **exception_handlers** - A dictionary mapping either integer status codes,
+ * **exception_handlers** - A mapping of either integer status codes,
or exception class types onto callables which handle the exceptions.
Exception handler callables should be of the form
`handler(request, exc) -> response` and may be be either standard functions, or
@@ -41,8 +43,8 @@
debug: bool = False,
routes: typing.Sequence[BaseRoute] = None,
middleware: typing.Sequence[Middleware] = None,
- exception_handlers: typing.Dict[
- typing.Union[int, typing.Type[Exception]], typing.Callable
+ exception_handlers: typing.Mapping[
+ typing.Any, typing.Callable[[Request, Exception], Response]
] = None,
on_startup: typing.Sequence[typing.Callable] = None,
on_shutdown: typing.Sequence[typing.Callable] = None,
@@ -68,7 +70,9 @@
def build_middleware_stack(self) -> ASGIApp:
debug = self.debug
error_handler = None
- exception_handlers = {}
+ exception_handlers: typing.Dict[
+ typing.Any, typing.Callable[[Request, Exception], Response]
+ ] = {}
for key, value in self.exception_handlers.items():
if key in (500, Exception):
diff --git a/starlette/exceptions.py b/starlette/exceptions.py
--- a/starlette/exceptions.py
+++ b/starlette/exceptions.py
@@ -22,7 +22,12 @@
class ExceptionMiddleware:
def __init__(
- self, app: ASGIApp, handlers: dict = None, debug: bool = False
+ self,
+ app: ASGIApp,
+ handlers: typing.Mapping[
+ typing.Any, typing.Callable[[Request, Exception], Response]
+ ] = None,
+ debug: bool = False,
) -> None:
self.app = app
self.debug = debug # TODO: We ought to handle 404 cases if debug is set.
@@ -37,7 +42,7 @@
def add_exception_handler(
self,
exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],
- handler: typing.Callable,
+ handler: typing.Callable[[Request, Exception], Response],
) -> None:
if isinstance(exc_class_or_status_code, int):
self._status_handlers[exc_class_or_status_code] = handler
| {"golden_diff": "diff --git a/starlette/applications.py b/starlette/applications.py\n--- a/starlette/applications.py\n+++ b/starlette/applications.py\n@@ -5,6 +5,8 @@\n from starlette.middleware import Middleware\n from starlette.middleware.base import BaseHTTPMiddleware\n from starlette.middleware.errors import ServerErrorMiddleware\n+from starlette.requests import Request\n+from starlette.responses import Response\n from starlette.routing import BaseRoute, Router\n from starlette.types import ASGIApp, Receive, Scope, Send\n \n@@ -23,7 +25,7 @@\n any uncaught errors occurring anywhere in the entire stack.\n `ExceptionMiddleware` is added as the very innermost middleware, to deal\n with handled exception cases occurring in the routing or endpoints.\n- * **exception_handlers** - A dictionary mapping either integer status codes,\n+ * **exception_handlers** - A mapping of either integer status codes,\n or exception class types onto callables which handle the exceptions.\n Exception handler callables should be of the form\n `handler(request, exc) -> response` and may be be either standard functions, or\n@@ -41,8 +43,8 @@\n debug: bool = False,\n routes: typing.Sequence[BaseRoute] = None,\n middleware: typing.Sequence[Middleware] = None,\n- exception_handlers: typing.Dict[\n- typing.Union[int, typing.Type[Exception]], typing.Callable\n+ exception_handlers: typing.Mapping[\n+ typing.Any, typing.Callable[[Request, Exception], Response]\n ] = None,\n on_startup: typing.Sequence[typing.Callable] = None,\n on_shutdown: typing.Sequence[typing.Callable] = None,\n@@ -68,7 +70,9 @@\n def build_middleware_stack(self) -> ASGIApp:\n debug = self.debug\n error_handler = None\n- exception_handlers = {}\n+ exception_handlers: typing.Dict[\n+ typing.Any, typing.Callable[[Request, Exception], Response]\n+ ] = {}\n \n for key, value in self.exception_handlers.items():\n if key in (500, Exception):\ndiff --git a/starlette/exceptions.py b/starlette/exceptions.py\n--- a/starlette/exceptions.py\n+++ b/starlette/exceptions.py\n@@ -22,7 +22,12 @@\n \n class ExceptionMiddleware:\n def __init__(\n- self, app: ASGIApp, handlers: dict = None, debug: bool = False\n+ self,\n+ app: ASGIApp,\n+ handlers: typing.Mapping[\n+ typing.Any, typing.Callable[[Request, Exception], Response]\n+ ] = None,\n+ debug: bool = False,\n ) -> None:\n self.app = app\n self.debug = debug # TODO: We ought to handle 404 cases if debug is set.\n@@ -37,7 +42,7 @@\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n- handler: typing.Callable,\n+ handler: typing.Callable[[Request, Exception], Response],\n ) -> None:\n if isinstance(exc_class_or_status_code, int):\n self._status_handlers[exc_class_or_status_code] = handler\n", "issue": "mypy complains about `exception_handlers` dict with `HTTPException` key\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\nRelated, but not the same:\r\n* #1034 \r\n* #1139 \r\n* #949\r\n\r\n### Describe the bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To reproduce\r\n\r\nGiven an example.py: \r\n\r\n```python\r\nfrom starlette.applications import Starlette\r\nfrom starlette.exceptions import HTTPException\r\nfrom starlette.requests import Request\r\nfrom starlette.responses import Response\r\nfrom starlette.routing import Route\r\n\r\ndef handle_http_exception(request: Request, exc: HTTPException) -> Response:\r\n return Response('foo')\r\n\r\n\r\nexception_handlers = {\r\n HTTPException: handle_http_exception\r\n}\r\napp = Starlette(exception_handlers=exception_handlers)\r\n```\r\n\r\n### Expected behavior\r\n\r\nMypy does not present an error.\r\n\r\n### Actual behavior\r\n\r\n```console\r\n$ mypy example.py\r\nexample.py:14: error: Argument \"exception_handlers\" to \"Starlette\" has incompatible type \"Dict[Type[HTTPException], Callable[[Request, HTTPException], Response]]\"; expected \"Optional[Dict[Union[int, Type[Exception]], Callable[..., Any]]]\"\r\nFound 1 error in 1 file (checked 1 source file)\r\n```\r\n\r\n### Debugging material\r\n\r\nNone\r\n\r\n### Environment\r\n\r\n- OS: Debian (`Linux ... 4.19.0-14-amd64 #1 SMP Debian 4.19.171-2 (2021-01-30) x86_64 GNU/Linux`)\r\n- Python version: `Python 3.9.1`\r\n- Starlette version: `starlette==0.14.2`\r\n\r\n### Additional context\r\n\r\nFor propriety:\r\n\r\nhttps://github.com/encode/starlette/blob/master/docs/exceptions.md#L42\r\n\r\nhttps://github.com/encode/starlette/blob/ed73b97c9b8f4aa527eed88032c04ce9fe109a3c/starlette/applications.py#L44\r\n\r\nI think this is a Starlette issue and not a mypy issue; apologies if I'm wrong here.\n", "before_files": [{"content": "import asyncio\nimport http\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse, Response\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass HTTPException(Exception):\n def __init__(self, status_code: int, detail: str = None) -> None:\n if detail is None:\n detail = http.HTTPStatus(status_code).phrase\n self.status_code = status_code\n self.detail = detail\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})\"\n\n\nclass ExceptionMiddleware:\n def __init__(\n self, app: ASGIApp, handlers: dict = None, debug: bool = False\n ) -> None:\n self.app = app\n self.debug = debug # TODO: We ought to handle 404 cases if debug is set.\n self._status_handlers: typing.Dict[int, typing.Callable] = {}\n self._exception_handlers: typing.Dict[\n typing.Type[Exception], typing.Callable\n ] = {HTTPException: self.http_exception}\n if handlers is not None:\n for key, value in handlers.items():\n self.add_exception_handler(key, value)\n\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n handler: typing.Callable,\n ) -> None:\n if isinstance(exc_class_or_status_code, int):\n self._status_handlers[exc_class_or_status_code] = handler\n else:\n assert issubclass(exc_class_or_status_code, Exception)\n self._exception_handlers[exc_class_or_status_code] = handler\n\n def _lookup_exception_handler(\n self, exc: Exception\n ) -> typing.Optional[typing.Callable]:\n for cls in type(exc).__mro__:\n if cls in self._exception_handlers:\n return self._exception_handlers[cls]\n return None\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_started = False\n\n async def sender(message: Message) -> None:\n nonlocal response_started\n\n if message[\"type\"] == \"http.response.start\":\n response_started = True\n await send(message)\n\n try:\n await self.app(scope, receive, sender)\n except Exception as exc:\n handler = None\n\n if isinstance(exc, HTTPException):\n handler = self._status_handlers.get(exc.status_code)\n\n if handler is None:\n handler = self._lookup_exception_handler(exc)\n\n if handler is None:\n raise exc\n\n if response_started:\n msg = \"Caught handled exception, but response already started.\"\n raise RuntimeError(msg) from exc\n\n request = Request(scope, receive=receive)\n if asyncio.iscoroutinefunction(handler):\n response = await handler(request, exc)\n else:\n response = await run_in_threadpool(handler, request, exc)\n await response(scope, receive, sender)\n\n def http_exception(self, request: Request, exc: HTTPException) -> Response:\n if exc.status_code in {204, 304}:\n return Response(b\"\", status_code=exc.status_code)\n return PlainTextResponse(exc.detail, status_code=exc.status_code)\n", "path": "starlette/exceptions.py"}, {"content": "import typing\n\nfrom starlette.datastructures import State, URLPath\nfrom starlette.exceptions import ExceptionMiddleware\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.base import BaseHTTPMiddleware\nfrom starlette.middleware.errors import ServerErrorMiddleware\nfrom starlette.routing import BaseRoute, Router\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\n\nclass Starlette:\n \"\"\"\n Creates an application instance.\n\n **Parameters:**\n\n * **debug** - Boolean indicating if debug tracebacks should be returned on errors.\n * **routes** - A list of routes to serve incoming HTTP and WebSocket requests.\n * **middleware** - A list of middleware to run for every request. A starlette\n application will always automatically include two middleware classes.\n `ServerErrorMiddleware` is added as the very outermost middleware, to handle\n any uncaught errors occurring anywhere in the entire stack.\n `ExceptionMiddleware` is added as the very innermost middleware, to deal\n with handled exception cases occurring in the routing or endpoints.\n * **exception_handlers** - A dictionary mapping either integer status codes,\n or exception class types onto callables which handle the exceptions.\n Exception handler callables should be of the form\n `handler(request, exc) -> response` and may be be either standard functions, or\n async functions.\n * **on_startup** - A list of callables to run on application startup.\n Startup handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n * **on_shutdown** - A list of callables to run on application shutdown.\n Shutdown handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n \"\"\"\n\n def __init__(\n self,\n debug: bool = False,\n routes: typing.Sequence[BaseRoute] = None,\n middleware: typing.Sequence[Middleware] = None,\n exception_handlers: typing.Dict[\n typing.Union[int, typing.Type[Exception]], typing.Callable\n ] = None,\n on_startup: typing.Sequence[typing.Callable] = None,\n on_shutdown: typing.Sequence[typing.Callable] = None,\n lifespan: typing.Callable[[\"Starlette\"], typing.AsyncContextManager] = None,\n ) -> None:\n # The lifespan context function is a newer style that replaces\n # on_startup / on_shutdown handlers. Use one or the other, not both.\n assert lifespan is None or (\n on_startup is None and on_shutdown is None\n ), \"Use either 'lifespan' or 'on_startup'/'on_shutdown', not both.\"\n\n self._debug = debug\n self.state = State()\n self.router = Router(\n routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan\n )\n self.exception_handlers = (\n {} if exception_handlers is None else dict(exception_handlers)\n )\n self.user_middleware = [] if middleware is None else list(middleware)\n self.middleware_stack = self.build_middleware_stack()\n\n def build_middleware_stack(self) -> ASGIApp:\n debug = self.debug\n error_handler = None\n exception_handlers = {}\n\n for key, value in self.exception_handlers.items():\n if key in (500, Exception):\n error_handler = value\n else:\n exception_handlers[key] = value\n\n middleware = (\n [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]\n + self.user_middleware\n + [\n Middleware(\n ExceptionMiddleware, handlers=exception_handlers, debug=debug\n )\n ]\n )\n\n app = self.router\n for cls, options in reversed(middleware):\n app = cls(app=app, **options)\n return app\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return self.router.routes\n\n @property\n def debug(self) -> bool:\n return self._debug\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self._debug = value\n self.middleware_stack = self.build_middleware_stack()\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n return self.router.url_path_for(name, **path_params)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n scope[\"app\"] = self\n await self.middleware_stack(scope, receive, send)\n\n # The following usages are now discouraged in favour of configuration\n # \u00a0during Starlette.__init__(...)\n def on_event(self, event_type: str) -> typing.Callable:\n return self.router.on_event(event_type)\n\n def mount(self, path: str, app: ASGIApp, name: str = None) -> None:\n self.router.mount(path, app=app, name=name)\n\n def host(self, host: str, app: ASGIApp, name: str = None) -> None:\n self.router.host(host, app=app, name=name)\n\n def add_middleware(self, middleware_class: type, **options: typing.Any) -> None:\n self.user_middleware.insert(0, Middleware(middleware_class, **options))\n self.middleware_stack = self.build_middleware_stack()\n\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n handler: typing.Callable,\n ) -> None:\n self.exception_handlers[exc_class_or_status_code] = handler\n self.middleware_stack = self.build_middleware_stack()\n\n def add_event_handler(self, event_type: str, func: typing.Callable) -> None:\n self.router.add_event_handler(event_type, func)\n\n def add_route(\n self,\n path: str,\n route: typing.Callable,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> None:\n self.router.add_route(\n path, route, methods=methods, name=name, include_in_schema=include_in_schema\n )\n\n def add_websocket_route(\n self, path: str, route: typing.Callable, name: str = None\n ) -> None:\n self.router.add_websocket_route(path, route, name=name)\n\n def exception_handler(\n self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]]\n ) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_exception_handler(exc_class_or_status_code, func)\n return func\n\n return decorator\n\n def route(\n self,\n path: str,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_route(\n path,\n func,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n return func\n\n return decorator\n\n def websocket_route(self, path: str, name: str = None) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_websocket_route(path, func, name=name)\n return func\n\n return decorator\n\n def middleware(self, middleware_type: str) -> typing.Callable:\n assert (\n middleware_type == \"http\"\n ), 'Currently only middleware(\"http\") is supported.'\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_middleware(BaseHTTPMiddleware, dispatch=func)\n return func\n\n return decorator\n", "path": "starlette/applications.py"}], "after_files": [{"content": "import asyncio\nimport http\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse, Response\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass HTTPException(Exception):\n def __init__(self, status_code: int, detail: str = None) -> None:\n if detail is None:\n detail = http.HTTPStatus(status_code).phrase\n self.status_code = status_code\n self.detail = detail\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})\"\n\n\nclass ExceptionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n handlers: typing.Mapping[\n typing.Any, typing.Callable[[Request, Exception], Response]\n ] = None,\n debug: bool = False,\n ) -> None:\n self.app = app\n self.debug = debug # TODO: We ought to handle 404 cases if debug is set.\n self._status_handlers: typing.Dict[int, typing.Callable] = {}\n self._exception_handlers: typing.Dict[\n typing.Type[Exception], typing.Callable\n ] = {HTTPException: self.http_exception}\n if handlers is not None:\n for key, value in handlers.items():\n self.add_exception_handler(key, value)\n\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n handler: typing.Callable[[Request, Exception], Response],\n ) -> None:\n if isinstance(exc_class_or_status_code, int):\n self._status_handlers[exc_class_or_status_code] = handler\n else:\n assert issubclass(exc_class_or_status_code, Exception)\n self._exception_handlers[exc_class_or_status_code] = handler\n\n def _lookup_exception_handler(\n self, exc: Exception\n ) -> typing.Optional[typing.Callable]:\n for cls in type(exc).__mro__:\n if cls in self._exception_handlers:\n return self._exception_handlers[cls]\n return None\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_started = False\n\n async def sender(message: Message) -> None:\n nonlocal response_started\n\n if message[\"type\"] == \"http.response.start\":\n response_started = True\n await send(message)\n\n try:\n await self.app(scope, receive, sender)\n except Exception as exc:\n handler = None\n\n if isinstance(exc, HTTPException):\n handler = self._status_handlers.get(exc.status_code)\n\n if handler is None:\n handler = self._lookup_exception_handler(exc)\n\n if handler is None:\n raise exc\n\n if response_started:\n msg = \"Caught handled exception, but response already started.\"\n raise RuntimeError(msg) from exc\n\n request = Request(scope, receive=receive)\n if asyncio.iscoroutinefunction(handler):\n response = await handler(request, exc)\n else:\n response = await run_in_threadpool(handler, request, exc)\n await response(scope, receive, sender)\n\n def http_exception(self, request: Request, exc: HTTPException) -> Response:\n if exc.status_code in {204, 304}:\n return Response(b\"\", status_code=exc.status_code)\n return PlainTextResponse(exc.detail, status_code=exc.status_code)\n", "path": "starlette/exceptions.py"}, {"content": "import typing\n\nfrom starlette.datastructures import State, URLPath\nfrom starlette.exceptions import ExceptionMiddleware\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.base import BaseHTTPMiddleware\nfrom starlette.middleware.errors import ServerErrorMiddleware\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import BaseRoute, Router\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\n\nclass Starlette:\n \"\"\"\n Creates an application instance.\n\n **Parameters:**\n\n * **debug** - Boolean indicating if debug tracebacks should be returned on errors.\n * **routes** - A list of routes to serve incoming HTTP and WebSocket requests.\n * **middleware** - A list of middleware to run for every request. A starlette\n application will always automatically include two middleware classes.\n `ServerErrorMiddleware` is added as the very outermost middleware, to handle\n any uncaught errors occurring anywhere in the entire stack.\n `ExceptionMiddleware` is added as the very innermost middleware, to deal\n with handled exception cases occurring in the routing or endpoints.\n * **exception_handlers** - A mapping of either integer status codes,\n or exception class types onto callables which handle the exceptions.\n Exception handler callables should be of the form\n `handler(request, exc) -> response` and may be be either standard functions, or\n async functions.\n * **on_startup** - A list of callables to run on application startup.\n Startup handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n * **on_shutdown** - A list of callables to run on application shutdown.\n Shutdown handler callables do not take any arguments, and may be be either\n standard functions, or async functions.\n \"\"\"\n\n def __init__(\n self,\n debug: bool = False,\n routes: typing.Sequence[BaseRoute] = None,\n middleware: typing.Sequence[Middleware] = None,\n exception_handlers: typing.Mapping[\n typing.Any, typing.Callable[[Request, Exception], Response]\n ] = None,\n on_startup: typing.Sequence[typing.Callable] = None,\n on_shutdown: typing.Sequence[typing.Callable] = None,\n lifespan: typing.Callable[[\"Starlette\"], typing.AsyncContextManager] = None,\n ) -> None:\n # The lifespan context function is a newer style that replaces\n # on_startup / on_shutdown handlers. Use one or the other, not both.\n assert lifespan is None or (\n on_startup is None and on_shutdown is None\n ), \"Use either 'lifespan' or 'on_startup'/'on_shutdown', not both.\"\n\n self._debug = debug\n self.state = State()\n self.router = Router(\n routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan\n )\n self.exception_handlers = (\n {} if exception_handlers is None else dict(exception_handlers)\n )\n self.user_middleware = [] if middleware is None else list(middleware)\n self.middleware_stack = self.build_middleware_stack()\n\n def build_middleware_stack(self) -> ASGIApp:\n debug = self.debug\n error_handler = None\n exception_handlers: typing.Dict[\n typing.Any, typing.Callable[[Request, Exception], Response]\n ] = {}\n\n for key, value in self.exception_handlers.items():\n if key in (500, Exception):\n error_handler = value\n else:\n exception_handlers[key] = value\n\n middleware = (\n [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]\n + self.user_middleware\n + [\n Middleware(\n ExceptionMiddleware, handlers=exception_handlers, debug=debug\n )\n ]\n )\n\n app = self.router\n for cls, options in reversed(middleware):\n app = cls(app=app, **options)\n return app\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return self.router.routes\n\n @property\n def debug(self) -> bool:\n return self._debug\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self._debug = value\n self.middleware_stack = self.build_middleware_stack()\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n return self.router.url_path_for(name, **path_params)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n scope[\"app\"] = self\n await self.middleware_stack(scope, receive, send)\n\n # The following usages are now discouraged in favour of configuration\n # \u00a0during Starlette.__init__(...)\n def on_event(self, event_type: str) -> typing.Callable:\n return self.router.on_event(event_type)\n\n def mount(self, path: str, app: ASGIApp, name: str = None) -> None:\n self.router.mount(path, app=app, name=name)\n\n def host(self, host: str, app: ASGIApp, name: str = None) -> None:\n self.router.host(host, app=app, name=name)\n\n def add_middleware(self, middleware_class: type, **options: typing.Any) -> None:\n self.user_middleware.insert(0, Middleware(middleware_class, **options))\n self.middleware_stack = self.build_middleware_stack()\n\n def add_exception_handler(\n self,\n exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],\n handler: typing.Callable,\n ) -> None:\n self.exception_handlers[exc_class_or_status_code] = handler\n self.middleware_stack = self.build_middleware_stack()\n\n def add_event_handler(self, event_type: str, func: typing.Callable) -> None:\n self.router.add_event_handler(event_type, func)\n\n def add_route(\n self,\n path: str,\n route: typing.Callable,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> None:\n self.router.add_route(\n path, route, methods=methods, name=name, include_in_schema=include_in_schema\n )\n\n def add_websocket_route(\n self, path: str, route: typing.Callable, name: str = None\n ) -> None:\n self.router.add_websocket_route(path, route, name=name)\n\n def exception_handler(\n self, exc_class_or_status_code: typing.Union[int, typing.Type[Exception]]\n ) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_exception_handler(exc_class_or_status_code, func)\n return func\n\n return decorator\n\n def route(\n self,\n path: str,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_route(\n path,\n func,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n return func\n\n return decorator\n\n def websocket_route(self, path: str, name: str = None) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.router.add_websocket_route(path, func, name=name)\n return func\n\n return decorator\n\n def middleware(self, middleware_type: str) -> typing.Callable:\n assert (\n middleware_type == \"http\"\n ), 'Currently only middleware(\"http\") is supported.'\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_middleware(BaseHTTPMiddleware, dispatch=func)\n return func\n\n return decorator\n", "path": "starlette/applications.py"}]} | 3,907 | 705 |
gh_patches_debug_1273 | rasdani/github-patches | git_diff | ivy-llc__ivy-17162 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is_integer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/attribute.py`
Content:
```
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6
7
8 @to_ivy_arrays_and_back
9 def is_complex(x):
10 return ivy.is_complex_dtype(x)
11
12
13 @to_ivy_arrays_and_back
14 def is_floating_point(x):
15 return ivy.is_float_dtype(x)
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py
--- a/ivy/functional/frontends/paddle/tensor/attribute.py
+++ b/ivy/functional/frontends/paddle/tensor/attribute.py
@@ -10,6 +10,11 @@
return ivy.is_complex_dtype(x)
+@to_ivy_arrays_and_back
+def is_integer(x):
+ return ivy.is_int_dtype(x)
+
+
@to_ivy_arrays_and_back
def is_floating_point(x):
return ivy.is_float_dtype(x)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py\n--- a/ivy/functional/frontends/paddle/tensor/attribute.py\n+++ b/ivy/functional/frontends/paddle/tensor/attribute.py\n@@ -10,6 +10,11 @@\n return ivy.is_complex_dtype(x)\n \n \n+@to_ivy_arrays_and_back\n+def is_integer(x):\n+ return ivy.is_int_dtype(x)\n+\n+\n @to_ivy_arrays_and_back\n def is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "issue": "is_integer\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef is_complex(x):\n return ivy.is_complex_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "path": "ivy/functional/frontends/paddle/tensor/attribute.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef is_complex(x):\n return ivy.is_complex_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_integer(x):\n return ivy.is_int_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "path": "ivy/functional/frontends/paddle/tensor/attribute.py"}]} | 370 | 138 |
gh_patches_debug_28376 | rasdani/github-patches | git_diff | iterative__dvc-4075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement `--no-exec` option for `import-url` command
`dvc import-url` creates new `.dvc` file, just as `dvc run`. Sometimes files which would be imported are already present locally and it's quite inconvenient that they should be downloaded again in order to create a pipeline step.
Because of that it would be great to add `--no-exec` option: we create pipeline step, then use `dvc commit` to update its md5 with already downloaded file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/imp_url.py`
Content:
```
1 import os
2
3 from dvc.repo.scm_context import scm_context
4 from dvc.utils import relpath, resolve_output, resolve_paths
5 from dvc.utils.fs import path_isin
6
7 from ..exceptions import OutputDuplicationError
8 from . import locked
9
10
11 @locked
12 @scm_context
13 def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):
14 from dvc.dvcfile import Dvcfile
15 from dvc.stage import Stage, create_stage
16
17 out = resolve_output(url, out)
18 path, wdir, out = resolve_paths(self, out)
19
20 # NOTE: when user is importing something from within their own repository
21 if (
22 erepo is None
23 and os.path.exists(url)
24 and path_isin(os.path.abspath(url), self.root_dir)
25 ):
26 url = relpath(url, wdir)
27
28 stage = create_stage(
29 Stage,
30 self,
31 fname or path,
32 wdir=wdir,
33 deps=[url],
34 outs=[out],
35 erepo=erepo,
36 )
37
38 if stage is None:
39 return None
40
41 dvcfile = Dvcfile(self, stage.path)
42 dvcfile.remove()
43
44 try:
45 self.check_modified_graph([stage])
46 except OutputDuplicationError as exc:
47 raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})
48
49 stage.run()
50
51 stage.frozen = frozen
52
53 dvcfile.dump(stage)
54
55 return stage
56
```
Path: `dvc/command/imp_url.py`
Content:
```
1 import argparse
2 import logging
3
4 from dvc.command import completion
5 from dvc.command.base import CmdBase, append_doc_link
6 from dvc.exceptions import DvcException
7
8 logger = logging.getLogger(__name__)
9
10
11 class CmdImportUrl(CmdBase):
12 def run(self):
13 try:
14 self.repo.imp_url(
15 self.args.url, out=self.args.out, fname=self.args.file
16 )
17 except DvcException:
18 logger.exception(
19 "failed to import {}. You could also try downloading "
20 "it manually, and adding it with `dvc add`.".format(
21 self.args.url
22 )
23 )
24 return 1
25 return 0
26
27
28 def add_parser(subparsers, parent_parser):
29 IMPORT_HELP = (
30 "Download or copy file from URL and take it under DVC control."
31 )
32
33 import_parser = subparsers.add_parser(
34 "import-url",
35 parents=[parent_parser],
36 description=append_doc_link(IMPORT_HELP, "import-url"),
37 help=IMPORT_HELP,
38 formatter_class=argparse.RawTextHelpFormatter,
39 )
40 import_parser.add_argument(
41 "url",
42 help="Supported urls:\n"
43 "/path/to/file\n"
44 "/path/to/directory\n"
45 "C:\\\\path\\to\\file\n"
46 "C:\\\\path\\to\\directory\n"
47 "https://example.com/path/to/file\n"
48 "s3://bucket/path/to/file\n"
49 "s3://bucket/path/to/directory\n"
50 "gs://bucket/path/to/file\n"
51 "gs://bucket/path/to/directory\n"
52 "hdfs://example.com/path/to/file\n"
53 "ssh://example.com:/path/to/file\n"
54 "ssh://example.com:/path/to/directory\n"
55 "remote://myremote/path/to/file (see `dvc remote`)",
56 )
57 import_parser.add_argument(
58 "out",
59 nargs="?",
60 help="Destination path to put files to.",
61 choices=completion.Optional.DIR,
62 )
63 import_parser.add_argument(
64 "--file",
65 help="Specify name of the DVC-file this command will generate.",
66 metavar="<filename>",
67 choices=completion.Optional.DIR,
68 )
69 import_parser.set_defaults(func=CmdImportUrl)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/imp_url.py b/dvc/command/imp_url.py
--- a/dvc/command/imp_url.py
+++ b/dvc/command/imp_url.py
@@ -12,7 +12,10 @@
def run(self):
try:
self.repo.imp_url(
- self.args.url, out=self.args.out, fname=self.args.file
+ self.args.url,
+ out=self.args.out,
+ fname=self.args.file,
+ no_exec=self.args.no_exec,
)
except DvcException:
logger.exception(
@@ -66,4 +69,10 @@
metavar="<filename>",
choices=completion.Optional.DIR,
)
+ import_parser.add_argument(
+ "--no-exec",
+ action="store_true",
+ default=False,
+ help="Only create stage file without actually download it.",
+ )
import_parser.set_defaults(func=CmdImportUrl)
diff --git a/dvc/repo/imp_url.py b/dvc/repo/imp_url.py
--- a/dvc/repo/imp_url.py
+++ b/dvc/repo/imp_url.py
@@ -10,7 +10,9 @@
@locked
@scm_context
-def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):
+def imp_url(
+ self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False
+):
from dvc.dvcfile import Dvcfile
from dvc.stage import Stage, create_stage
@@ -46,7 +48,10 @@
except OutputDuplicationError as exc:
raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})
- stage.run()
+ if no_exec:
+ stage.ignore_outs()
+ else:
+ stage.run()
stage.frozen = frozen
| {"golden_diff": "diff --git a/dvc/command/imp_url.py b/dvc/command/imp_url.py\n--- a/dvc/command/imp_url.py\n+++ b/dvc/command/imp_url.py\n@@ -12,7 +12,10 @@\n def run(self):\n try:\n self.repo.imp_url(\n- self.args.url, out=self.args.out, fname=self.args.file\n+ self.args.url,\n+ out=self.args.out,\n+ fname=self.args.file,\n+ no_exec=self.args.no_exec,\n )\n except DvcException:\n logger.exception(\n@@ -66,4 +69,10 @@\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n+ import_parser.add_argument(\n+ \"--no-exec\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Only create stage file without actually download it.\",\n+ )\n import_parser.set_defaults(func=CmdImportUrl)\ndiff --git a/dvc/repo/imp_url.py b/dvc/repo/imp_url.py\n--- a/dvc/repo/imp_url.py\n+++ b/dvc/repo/imp_url.py\n@@ -10,7 +10,9 @@\n \n @locked\n @scm_context\n-def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):\n+def imp_url(\n+ self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False\n+):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n \n@@ -46,7 +48,10 @@\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n \n- stage.run()\n+ if no_exec:\n+ stage.ignore_outs()\n+ else:\n+ stage.run()\n \n stage.frozen = frozen\n", "issue": "Implement `--no-exec` option for `import-url` command\n`dvc import-url` creates new `.dvc` file, just as `dvc run`. Sometimes files which would be imported are already present locally and it's quite inconvenient that they should be downloaded again in order to create a pipeline step.\r\n\r\nBecause of that it would be great to add `--no-exec` option: we create pipeline step, then use `dvc commit` to update its md5 with already downloaded file.\n", "before_files": [{"content": "import os\n\nfrom dvc.repo.scm_context import scm_context\nfrom dvc.utils import relpath, resolve_output, resolve_paths\nfrom dvc.utils.fs import path_isin\n\nfrom ..exceptions import OutputDuplicationError\nfrom . import locked\n\n\n@locked\n@scm_context\ndef imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n\n out = resolve_output(url, out)\n path, wdir, out = resolve_paths(self, out)\n\n # NOTE: when user is importing something from within their own repository\n if (\n erepo is None\n and os.path.exists(url)\n and path_isin(os.path.abspath(url), self.root_dir)\n ):\n url = relpath(url, wdir)\n\n stage = create_stage(\n Stage,\n self,\n fname or path,\n wdir=wdir,\n deps=[url],\n outs=[out],\n erepo=erepo,\n )\n\n if stage is None:\n return None\n\n dvcfile = Dvcfile(self, stage.path)\n dvcfile.remove()\n\n try:\n self.check_modified_graph([stage])\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n\n stage.run()\n\n stage.frozen = frozen\n\n dvcfile.dump(stage)\n\n return stage\n", "path": "dvc/repo/imp_url.py"}, {"content": "import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdImportUrl(CmdBase):\n def run(self):\n try:\n self.repo.imp_url(\n self.args.url, out=self.args.out, fname=self.args.file\n )\n except DvcException:\n logger.exception(\n \"failed to import {}. You could also try downloading \"\n \"it manually, and adding it with `dvc add`.\".format(\n self.args.url\n )\n )\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n IMPORT_HELP = (\n \"Download or copy file from URL and take it under DVC control.\"\n )\n\n import_parser = subparsers.add_parser(\n \"import-url\",\n parents=[parent_parser],\n description=append_doc_link(IMPORT_HELP, \"import-url\"),\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n import_parser.add_argument(\n \"url\",\n help=\"Supported urls:\\n\"\n \"/path/to/file\\n\"\n \"/path/to/directory\\n\"\n \"C:\\\\\\\\path\\\\to\\\\file\\n\"\n \"C:\\\\\\\\path\\\\to\\\\directory\\n\"\n \"https://example.com/path/to/file\\n\"\n \"s3://bucket/path/to/file\\n\"\n \"s3://bucket/path/to/directory\\n\"\n \"gs://bucket/path/to/file\\n\"\n \"gs://bucket/path/to/directory\\n\"\n \"hdfs://example.com/path/to/file\\n\"\n \"ssh://example.com:/path/to/file\\n\"\n \"ssh://example.com:/path/to/directory\\n\"\n \"remote://myremote/path/to/file (see `dvc remote`)\",\n )\n import_parser.add_argument(\n \"out\",\n nargs=\"?\",\n help=\"Destination path to put files to.\",\n choices=completion.Optional.DIR,\n )\n import_parser.add_argument(\n \"--file\",\n help=\"Specify name of the DVC-file this command will generate.\",\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n import_parser.set_defaults(func=CmdImportUrl)\n", "path": "dvc/command/imp_url.py"}], "after_files": [{"content": "import os\n\nfrom dvc.repo.scm_context import scm_context\nfrom dvc.utils import relpath, resolve_output, resolve_paths\nfrom dvc.utils.fs import path_isin\n\nfrom ..exceptions import OutputDuplicationError\nfrom . import locked\n\n\n@locked\n@scm_context\ndef imp_url(\n self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False\n):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n\n out = resolve_output(url, out)\n path, wdir, out = resolve_paths(self, out)\n\n # NOTE: when user is importing something from within their own repository\n if (\n erepo is None\n and os.path.exists(url)\n and path_isin(os.path.abspath(url), self.root_dir)\n ):\n url = relpath(url, wdir)\n\n stage = create_stage(\n Stage,\n self,\n fname or path,\n wdir=wdir,\n deps=[url],\n outs=[out],\n erepo=erepo,\n )\n\n if stage is None:\n return None\n\n dvcfile = Dvcfile(self, stage.path)\n dvcfile.remove()\n\n try:\n self.check_modified_graph([stage])\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n\n if no_exec:\n stage.ignore_outs()\n else:\n stage.run()\n\n stage.frozen = frozen\n\n dvcfile.dump(stage)\n\n return stage\n", "path": "dvc/repo/imp_url.py"}, {"content": "import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdImportUrl(CmdBase):\n def run(self):\n try:\n self.repo.imp_url(\n self.args.url,\n out=self.args.out,\n fname=self.args.file,\n no_exec=self.args.no_exec,\n )\n except DvcException:\n logger.exception(\n \"failed to import {}. You could also try downloading \"\n \"it manually, and adding it with `dvc add`.\".format(\n self.args.url\n )\n )\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n IMPORT_HELP = (\n \"Download or copy file from URL and take it under DVC control.\"\n )\n\n import_parser = subparsers.add_parser(\n \"import-url\",\n parents=[parent_parser],\n description=append_doc_link(IMPORT_HELP, \"import-url\"),\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n import_parser.add_argument(\n \"url\",\n help=\"Supported urls:\\n\"\n \"/path/to/file\\n\"\n \"/path/to/directory\\n\"\n \"C:\\\\\\\\path\\\\to\\\\file\\n\"\n \"C:\\\\\\\\path\\\\to\\\\directory\\n\"\n \"https://example.com/path/to/file\\n\"\n \"s3://bucket/path/to/file\\n\"\n \"s3://bucket/path/to/directory\\n\"\n \"gs://bucket/path/to/file\\n\"\n \"gs://bucket/path/to/directory\\n\"\n \"hdfs://example.com/path/to/file\\n\"\n \"ssh://example.com:/path/to/file\\n\"\n \"ssh://example.com:/path/to/directory\\n\"\n \"remote://myremote/path/to/file (see `dvc remote`)\",\n )\n import_parser.add_argument(\n \"out\",\n nargs=\"?\",\n help=\"Destination path to put files to.\",\n choices=completion.Optional.DIR,\n )\n import_parser.add_argument(\n \"--file\",\n help=\"Specify name of the DVC-file this command will generate.\",\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n import_parser.add_argument(\n \"--no-exec\",\n action=\"store_true\",\n default=False,\n help=\"Only create stage file without actually download it.\",\n )\n import_parser.set_defaults(func=CmdImportUrl)\n", "path": "dvc/command/imp_url.py"}]} | 1,428 | 418 |
gh_patches_debug_37644 | rasdani/github-patches | git_diff | SigmaHQ__sigma-257 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add possibility to configure backend options in conversion configuration file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/sigma/backends/base.py`
Content:
```
1 # Output backends for sigmac
2 # Copyright 2016-2018 Thomas Patzke, Florian Roth
3
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Lesser General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Lesser General Public License for more details.
13
14 # You should have received a copy of the GNU Lesser General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import sigma
18 from .mixins import RulenameCommentMixin, QuoteCharMixin
19
20 class BackendOptions(dict):
21 """Object contains all options that should be passed to the backend from command line (or other user interfaces)"""
22
23 def __init__(self, options):
24 """
25 Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:
26
27 * key=value: self{key} = value
28 * key: self{key} = True
29 """
30 if options == None:
31 return
32 for option in options:
33 parsed = option.split("=", 1)
34 try:
35 self[parsed[0]] = parsed[1]
36 except IndexError:
37 self[parsed[0]] = True
38
39 ### Generic backend base classes
40 class BaseBackend:
41 """Base class for all backends"""
42 identifier = "base"
43 active = False
44 index_field = None # field name that is used to address indices
45 file_list = None
46 options = tuple() # a list of tuples with following elements: option name, default value, help text, target attribute name (option name if None)
47
48 def __init__(self, sigmaconfig, backend_options=None):
49 """
50 Initialize backend. This gets a sigmaconfig object, which is notified about the used backend class by
51 passing the object instance to it.
52 """
53 super().__init__()
54 if not isinstance(sigmaconfig, (sigma.configuration.SigmaConfiguration, None)):
55 raise TypeError("SigmaConfiguration object expected")
56 self.backend_options = backend_options
57 self.sigmaconfig = sigmaconfig
58 self.sigmaconfig.set_backend(self)
59
60 # Parse options
61 for option, default_value, _, target in self.options:
62 if target is None:
63 target = option
64 setattr(self, target, self.backend_options.setdefault(option, default_value))
65
66 def generate(self, sigmaparser):
67 """Method is called for each sigma rule and receives the parsed rule (SigmaParser)"""
68 for parsed in sigmaparser.condparsed:
69 query = self.generateQuery(parsed)
70 before = self.generateBefore(parsed)
71 after = self.generateAfter(parsed)
72
73 result = ""
74 if before is not None:
75 result = before
76 if query is not None:
77 result += query
78 if after is not None:
79 result += after
80
81 return result
82
83 def generateQuery(self, parsed):
84 result = self.generateNode(parsed.parsedSearch)
85 if parsed.parsedAgg:
86 result += self.generateAggregation(parsed.parsedAgg)
87 return result
88
89 def generateNode(self, node):
90 if type(node) == sigma.parser.condition.ConditionAND:
91 return self.generateANDNode(node)
92 elif type(node) == sigma.parser.condition.ConditionOR:
93 return self.generateORNode(node)
94 elif type(node) == sigma.parser.condition.ConditionNOT:
95 return self.generateNOTNode(node)
96 elif type(node) == sigma.parser.condition.ConditionNULLValue:
97 return self.generateNULLValueNode(node)
98 elif type(node) == sigma.parser.condition.ConditionNotNULLValue:
99 return self.generateNotNULLValueNode(node)
100 elif type(node) == sigma.parser.condition.NodeSubexpression:
101 return self.generateSubexpressionNode(node)
102 elif type(node) == tuple:
103 return self.generateMapItemNode(node)
104 elif type(node) in (str, int):
105 return self.generateValueNode(node)
106 elif type(node) == list:
107 return self.generateListNode(node)
108 else:
109 raise TypeError("Node type %s was not expected in Sigma parse tree" % (str(type(node))))
110
111 def generateANDNode(self, node):
112 raise NotImplementedError("Node type not implemented for this backend")
113
114 def generateORNode(self, node):
115 raise NotImplementedError("Node type not implemented for this backend")
116
117 def generateNOTNode(self, node):
118 raise NotImplementedError("Node type not implemented for this backend")
119
120 def generateSubexpressionNode(self, node):
121 raise NotImplementedError("Node type not implemented for this backend")
122
123 def generateListNode(self, node):
124 raise NotImplementedError("Node type not implemented for this backend")
125
126 def generateMapItemNode(self, node):
127 raise NotImplementedError("Node type not implemented for this backend")
128
129 def generateValueNode(self, node):
130 raise NotImplementedError("Node type not implemented for this backend")
131
132 def generateNULLValueNode(self, node):
133 raise NotImplementedError("Node type not implemented for this backend")
134
135 def generateNotNULLValueNode(self, node):
136 raise NotImplementedError("Node type not implemented for this backend")
137
138 def generateAggregation(self, agg):
139 raise NotImplementedError("Aggregations not implemented for this backend")
140
141 def generateBefore(self, parsed):
142 return ""
143
144 def generateAfter(self, parsed):
145 return ""
146
147 def finalize(self):
148 """
149 Is called after the last file was processed with generate(). The right place if this backend is not intended to
150 look isolated at each rule, but generates an output which incorporates multiple rules, e.g. dashboards.
151 """
152 pass
153
154 class SingleTextQueryBackend(RulenameCommentMixin, BaseBackend, QuoteCharMixin):
155 """Base class for backends that generate one text-based expression from a Sigma rule"""
156 identifier = "base-textquery"
157 active = False
158
159 # the following class variables define the generation and behavior of queries from a parse tree some are prefilled with default values that are quite usual
160 andToken = None # Token used for linking expressions with logical AND
161 orToken = None # Same for OR
162 notToken = None # Same for NOT
163 subExpression = None # Syntax for subexpressions, usually parenthesis around it. %s is inner expression
164 listExpression = None # Syntax for lists, %s are list items separated with listSeparator
165 listSeparator = None # Character for separation of list items
166 valueExpression = None # Expression of values, %s represents value
167 nullExpression = None # Expression of queries for null values or non-existing fields. %s is field name
168 notNullExpression = None # Expression of queries for not null values. %s is field name
169 mapExpression = None # Syntax for field/value conditions. First %s is fieldname, second is value
170 mapListsSpecialHandling = False # Same handling for map items with list values as for normal values (strings, integers) if True, generateMapItemListNode method is called with node
171 mapListValueExpression = None # Syntax for field/value condititons where map value is a list
172
173 def generateANDNode(self, node):
174 generated = [ self.generateNode(val) for val in node ]
175 filtered = [ g for g in generated if g is not None ]
176 if filtered:
177 return self.andToken.join(filtered)
178 else:
179 return None
180
181 def generateORNode(self, node):
182 generated = [ self.generateNode(val) for val in node ]
183 filtered = [ g for g in generated if g is not None ]
184 if filtered:
185 return self.orToken.join(filtered)
186 else:
187 return None
188
189 def generateNOTNode(self, node):
190 generated = self.generateNode(node.item)
191 if generated is not None:
192 return self.notToken + generated
193 else:
194 return None
195
196 def generateSubexpressionNode(self, node):
197 generated = self.generateNode(node.items)
198 if generated:
199 return self.subExpression % generated
200 else:
201 return None
202
203 def generateListNode(self, node):
204 if not set([type(value) for value in node]).issubset({str, int}):
205 raise TypeError("List values must be strings or numbers")
206 return self.listExpression % (self.listSeparator.join([self.generateNode(value) for value in node]))
207
208 def generateMapItemNode(self, node):
209 fieldname, value = node
210
211 transformed_fieldname = self.fieldNameMapping(fieldname, value)
212 if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):
213 return self.mapExpression % (transformed_fieldname, self.generateNode(value))
214 elif type(value) == list:
215 return self.generateMapItemListNode(transformed_fieldname, value)
216 else:
217 raise TypeError("Backend does not support map values of type " + str(type(value)))
218
219 def generateMapItemListNode(self, fieldname, value):
220 return self.mapListValueExpression % (fieldname, self.generateNode(value))
221
222 def generateValueNode(self, node):
223 return self.valueExpression % (self.cleanValue(str(node)))
224
225 def generateNULLValueNode(self, node):
226 return self.nullExpression % (node.item)
227
228 def generateNotNULLValueNode(self, node):
229 return self.notNullExpression % (node.item)
230
231 def fieldNameMapping(self, fieldname, value):
232 """
233 Alter field names depending on the value(s). Backends may use this method to perform a final transformation of the field name
234 in addition to the field mapping defined in the conversion configuration. The field name passed to this method was already
235 transformed from the original name given in the Sigma rule.
236 """
237 return fieldname
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/sigma/backends/base.py b/tools/sigma/backends/base.py
--- a/tools/sigma/backends/base.py
+++ b/tools/sigma/backends/base.py
@@ -14,28 +14,73 @@
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import sys
+
import sigma
+import yaml
+
from .mixins import RulenameCommentMixin, QuoteCharMixin
+
class BackendOptions(dict):
- """Object contains all options that should be passed to the backend from command line (or other user interfaces)"""
+ """
+ Object containing all the options that should be passed to the backend.
+
+ The options can come from command line and a YAML configuration file, and will be merged together.
+ Options from the command line take precedence.
+ """
+
+ def __init__(self, options, config_file):
+ """
+ :param options: unparsed options coming from the CLI
+ :param config_file: path to a YAML configuration file
+ """
+
+ self._load_config_file(config_file)
+ self._parse_options(options)
- def __init__(self, options):
+ def _parse_options(self, options):
"""
- Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:
+ Populates options from the unparsed options of the CLI
- * key=value: self{key} = value
- * key: self{key} = True
+ :param options: list unparsed options from the CLI.
+ Each option can have one of the following formats:
+ - "key=value": the option key:value will be passed to the backend
+ - "key": the option key:True will be passed to the backend
"""
- if options == None:
+
+ if options is None:
return
+
for option in options:
parsed = option.split("=", 1)
try:
self[parsed[0]] = parsed[1]
except IndexError:
+ # If the option is present but doesn't map to a value, treat it as a boolean flag
self[parsed[0]] = True
+ def _load_config_file(self, path):
+ """
+ Populates options from a configuration file
+
+ :param path: Path to the configuration file
+ """
+ if path is None:
+ return
+
+ try:
+ with open(path, 'r') as config_file:
+ backend_config = yaml.safe_load(config_file.read())
+ for key in backend_config:
+ self[key] = backend_config[key]
+ except (IOError, OSError) as e:
+ print("Failed to open backend configuration file '%s': %s" % (path, str(e)), file=sys.stderr)
+ exit(1)
+ except yaml.YAMLError as e:
+ print("Failed to parse backend configuration file '%s' as valid YAML: %s" % (path, str(e)), file=sys.stderr)
+ exit(1)
+
### Generic backend base classes
class BaseBackend:
"""Base class for all backends"""
| {"golden_diff": "diff --git a/tools/sigma/backends/base.py b/tools/sigma/backends/base.py\n--- a/tools/sigma/backends/base.py\n+++ b/tools/sigma/backends/base.py\n@@ -14,28 +14,73 @@\n # You should have received a copy of the GNU Lesser General Public License\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n \n+import sys\n+\n import sigma\n+import yaml\n+\n from .mixins import RulenameCommentMixin, QuoteCharMixin\n \n+\n class BackendOptions(dict):\n- \"\"\"Object contains all options that should be passed to the backend from command line (or other user interfaces)\"\"\"\n+ \"\"\"\n+ Object containing all the options that should be passed to the backend.\n+ \n+ The options can come from command line and a YAML configuration file, and will be merged together.\n+ Options from the command line take precedence.\n+ \"\"\"\n+\n+ def __init__(self, options, config_file):\n+ \"\"\"\n+ :param options: unparsed options coming from the CLI\n+ :param config_file: path to a YAML configuration file\n+ \"\"\"\n+\n+ self._load_config_file(config_file)\n+ self._parse_options(options)\n \n- def __init__(self, options):\n+ def _parse_options(self, options):\n \"\"\"\n- Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:\n+ Populates options from the unparsed options of the CLI\n \n- * key=value: self{key} = value\n- * key: self{key} = True\n+ :param options: list unparsed options from the CLI.\n+ Each option can have one of the following formats:\n+ - \"key=value\": the option key:value will be passed to the backend\n+ - \"key\": the option key:True will be passed to the backend\n \"\"\"\n- if options == None:\n+\n+ if options is None:\n return\n+\n for option in options:\n parsed = option.split(\"=\", 1)\n try:\n self[parsed[0]] = parsed[1]\n except IndexError:\n+ # If the option is present but doesn't map to a value, treat it as a boolean flag\n self[parsed[0]] = True\n \n+ def _load_config_file(self, path):\n+ \"\"\"\n+ Populates options from a configuration file\n+\n+ :param path: Path to the configuration file\n+ \"\"\"\n+ if path is None:\n+ return\n+\n+ try:\n+ with open(path, 'r') as config_file:\n+ backend_config = yaml.safe_load(config_file.read())\n+ for key in backend_config:\n+ self[key] = backend_config[key]\n+ except (IOError, OSError) as e:\n+ print(\"Failed to open backend configuration file '%s': %s\" % (path, str(e)), file=sys.stderr)\n+ exit(1)\n+ except yaml.YAMLError as e:\n+ print(\"Failed to parse backend configuration file '%s' as valid YAML: %s\" % (path, str(e)), file=sys.stderr)\n+ exit(1)\n+\n ### Generic backend base classes\n class BaseBackend:\n \"\"\"Base class for all backends\"\"\"\n", "issue": "Add possibility to configure backend options in conversion configuration file\n\n", "before_files": [{"content": "# Output backends for sigmac\n# Copyright 2016-2018 Thomas Patzke, Florian Roth\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport sigma\nfrom .mixins import RulenameCommentMixin, QuoteCharMixin\n\nclass BackendOptions(dict):\n \"\"\"Object contains all options that should be passed to the backend from command line (or other user interfaces)\"\"\"\n\n def __init__(self, options):\n \"\"\"\n Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:\n\n * key=value: self{key} = value\n * key: self{key} = True\n \"\"\"\n if options == None:\n return\n for option in options:\n parsed = option.split(\"=\", 1)\n try:\n self[parsed[0]] = parsed[1]\n except IndexError:\n self[parsed[0]] = True\n\n### Generic backend base classes\nclass BaseBackend:\n \"\"\"Base class for all backends\"\"\"\n identifier = \"base\"\n active = False\n index_field = None # field name that is used to address indices\n file_list = None\n options = tuple() # a list of tuples with following elements: option name, default value, help text, target attribute name (option name if None)\n\n def __init__(self, sigmaconfig, backend_options=None):\n \"\"\"\n Initialize backend. This gets a sigmaconfig object, which is notified about the used backend class by\n passing the object instance to it.\n \"\"\"\n super().__init__()\n if not isinstance(sigmaconfig, (sigma.configuration.SigmaConfiguration, None)):\n raise TypeError(\"SigmaConfiguration object expected\")\n self.backend_options = backend_options\n self.sigmaconfig = sigmaconfig\n self.sigmaconfig.set_backend(self)\n\n # Parse options\n for option, default_value, _, target in self.options:\n if target is None:\n target = option\n setattr(self, target, self.backend_options.setdefault(option, default_value))\n\n def generate(self, sigmaparser):\n \"\"\"Method is called for each sigma rule and receives the parsed rule (SigmaParser)\"\"\"\n for parsed in sigmaparser.condparsed:\n query = self.generateQuery(parsed)\n before = self.generateBefore(parsed)\n after = self.generateAfter(parsed)\n\n result = \"\"\n if before is not None:\n result = before\n if query is not None:\n result += query\n if after is not None:\n result += after\n\n return result\n\n def generateQuery(self, parsed):\n result = self.generateNode(parsed.parsedSearch)\n if parsed.parsedAgg:\n result += self.generateAggregation(parsed.parsedAgg)\n return result\n\n def generateNode(self, node):\n if type(node) == sigma.parser.condition.ConditionAND:\n return self.generateANDNode(node)\n elif type(node) == sigma.parser.condition.ConditionOR:\n return self.generateORNode(node)\n elif type(node) == sigma.parser.condition.ConditionNOT:\n return self.generateNOTNode(node)\n elif type(node) == sigma.parser.condition.ConditionNULLValue:\n return self.generateNULLValueNode(node)\n elif type(node) == sigma.parser.condition.ConditionNotNULLValue:\n return self.generateNotNULLValueNode(node)\n elif type(node) == sigma.parser.condition.NodeSubexpression:\n return self.generateSubexpressionNode(node)\n elif type(node) == tuple:\n return self.generateMapItemNode(node)\n elif type(node) in (str, int):\n return self.generateValueNode(node)\n elif type(node) == list:\n return self.generateListNode(node)\n else:\n raise TypeError(\"Node type %s was not expected in Sigma parse tree\" % (str(type(node))))\n\n def generateANDNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateORNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNOTNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateSubexpressionNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateListNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateMapItemNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNULLValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNotNULLValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateAggregation(self, agg):\n raise NotImplementedError(\"Aggregations not implemented for this backend\")\n\n def generateBefore(self, parsed):\n return \"\"\n\n def generateAfter(self, parsed):\n return \"\"\n\n def finalize(self):\n \"\"\"\n Is called after the last file was processed with generate(). The right place if this backend is not intended to\n look isolated at each rule, but generates an output which incorporates multiple rules, e.g. dashboards.\n \"\"\"\n pass\n\nclass SingleTextQueryBackend(RulenameCommentMixin, BaseBackend, QuoteCharMixin):\n \"\"\"Base class for backends that generate one text-based expression from a Sigma rule\"\"\"\n identifier = \"base-textquery\"\n active = False\n\n # the following class variables define the generation and behavior of queries from a parse tree some are prefilled with default values that are quite usual\n andToken = None # Token used for linking expressions with logical AND\n orToken = None # Same for OR\n notToken = None # Same for NOT\n subExpression = None # Syntax for subexpressions, usually parenthesis around it. %s is inner expression\n listExpression = None # Syntax for lists, %s are list items separated with listSeparator\n listSeparator = None # Character for separation of list items\n valueExpression = None # Expression of values, %s represents value\n nullExpression = None # Expression of queries for null values or non-existing fields. %s is field name\n notNullExpression = None # Expression of queries for not null values. %s is field name\n mapExpression = None # Syntax for field/value conditions. First %s is fieldname, second is value\n mapListsSpecialHandling = False # Same handling for map items with list values as for normal values (strings, integers) if True, generateMapItemListNode method is called with node\n mapListValueExpression = None # Syntax for field/value condititons where map value is a list\n\n def generateANDNode(self, node):\n generated = [ self.generateNode(val) for val in node ]\n filtered = [ g for g in generated if g is not None ]\n if filtered:\n return self.andToken.join(filtered)\n else:\n return None\n\n def generateORNode(self, node):\n generated = [ self.generateNode(val) for val in node ]\n filtered = [ g for g in generated if g is not None ]\n if filtered:\n return self.orToken.join(filtered)\n else:\n return None\n\n def generateNOTNode(self, node):\n generated = self.generateNode(node.item)\n if generated is not None:\n return self.notToken + generated\n else:\n return None\n\n def generateSubexpressionNode(self, node):\n generated = self.generateNode(node.items)\n if generated:\n return self.subExpression % generated\n else:\n return None\n\n def generateListNode(self, node):\n if not set([type(value) for value in node]).issubset({str, int}):\n raise TypeError(\"List values must be strings or numbers\")\n return self.listExpression % (self.listSeparator.join([self.generateNode(value) for value in node]))\n\n def generateMapItemNode(self, node):\n fieldname, value = node\n\n transformed_fieldname = self.fieldNameMapping(fieldname, value)\n if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):\n return self.mapExpression % (transformed_fieldname, self.generateNode(value))\n elif type(value) == list:\n return self.generateMapItemListNode(transformed_fieldname, value)\n else:\n raise TypeError(\"Backend does not support map values of type \" + str(type(value)))\n\n def generateMapItemListNode(self, fieldname, value):\n return self.mapListValueExpression % (fieldname, self.generateNode(value))\n\n def generateValueNode(self, node):\n return self.valueExpression % (self.cleanValue(str(node)))\n\n def generateNULLValueNode(self, node):\n return self.nullExpression % (node.item)\n\n def generateNotNULLValueNode(self, node):\n return self.notNullExpression % (node.item)\n\n def fieldNameMapping(self, fieldname, value):\n \"\"\"\n Alter field names depending on the value(s). Backends may use this method to perform a final transformation of the field name\n in addition to the field mapping defined in the conversion configuration. The field name passed to this method was already\n transformed from the original name given in the Sigma rule.\n \"\"\"\n return fieldname\n", "path": "tools/sigma/backends/base.py"}], "after_files": [{"content": "# Output backends for sigmac\n# Copyright 2016-2018 Thomas Patzke, Florian Roth\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport sys\n\nimport sigma\nimport yaml\n\nfrom .mixins import RulenameCommentMixin, QuoteCharMixin\n\n\nclass BackendOptions(dict):\n \"\"\"\n Object containing all the options that should be passed to the backend.\n \n The options can come from command line and a YAML configuration file, and will be merged together.\n Options from the command line take precedence.\n \"\"\"\n\n def __init__(self, options, config_file):\n \"\"\"\n :param options: unparsed options coming from the CLI\n :param config_file: path to a YAML configuration file\n \"\"\"\n\n self._load_config_file(config_file)\n self._parse_options(options)\n\n def _parse_options(self, options):\n \"\"\"\n Populates options from the unparsed options of the CLI\n\n :param options: list unparsed options from the CLI.\n Each option can have one of the following formats:\n - \"key=value\": the option key:value will be passed to the backend\n - \"key\": the option key:True will be passed to the backend\n \"\"\"\n\n if options is None:\n return\n\n for option in options:\n parsed = option.split(\"=\", 1)\n try:\n self[parsed[0]] = parsed[1]\n except IndexError:\n # If the option is present but doesn't map to a value, treat it as a boolean flag\n self[parsed[0]] = True\n\n def _load_config_file(self, path):\n \"\"\"\n Populates options from a configuration file\n\n :param path: Path to the configuration file\n \"\"\"\n if path is None:\n return\n\n try:\n with open(path, 'r') as config_file:\n backend_config = yaml.safe_load(config_file.read())\n for key in backend_config:\n self[key] = backend_config[key]\n except (IOError, OSError) as e:\n print(\"Failed to open backend configuration file '%s': %s\" % (path, str(e)), file=sys.stderr)\n exit(1)\n except yaml.YAMLError as e:\n print(\"Failed to parse backend configuration file '%s' as valid YAML: %s\" % (path, str(e)), file=sys.stderr)\n exit(1)\n\n### Generic backend base classes\nclass BaseBackend:\n \"\"\"Base class for all backends\"\"\"\n identifier = \"base\"\n active = False\n index_field = None # field name that is used to address indices\n file_list = None\n options = tuple() # a list of tuples with following elements: option name, default value, help text, target attribute name (option name if None)\n\n def __init__(self, sigmaconfig, backend_options=None):\n \"\"\"\n Initialize backend. This gets a sigmaconfig object, which is notified about the used backend class by\n passing the object instance to it.\n \"\"\"\n super().__init__()\n if not isinstance(sigmaconfig, (sigma.configuration.SigmaConfiguration, None)):\n raise TypeError(\"SigmaConfiguration object expected\")\n self.backend_options = backend_options\n self.sigmaconfig = sigmaconfig\n self.sigmaconfig.set_backend(self)\n\n # Parse options\n for option, default_value, _, target in self.options:\n if target is None:\n target = option\n setattr(self, target, self.backend_options.setdefault(option, default_value))\n\n def generate(self, sigmaparser):\n \"\"\"Method is called for each sigma rule and receives the parsed rule (SigmaParser)\"\"\"\n for parsed in sigmaparser.condparsed:\n query = self.generateQuery(parsed)\n before = self.generateBefore(parsed)\n after = self.generateAfter(parsed)\n\n result = \"\"\n if before is not None:\n result = before\n if query is not None:\n result += query\n if after is not None:\n result += after\n\n return result\n\n def generateQuery(self, parsed):\n result = self.generateNode(parsed.parsedSearch)\n if parsed.parsedAgg:\n result += self.generateAggregation(parsed.parsedAgg)\n return result\n\n def generateNode(self, node):\n if type(node) == sigma.parser.condition.ConditionAND:\n return self.generateANDNode(node)\n elif type(node) == sigma.parser.condition.ConditionOR:\n return self.generateORNode(node)\n elif type(node) == sigma.parser.condition.ConditionNOT:\n return self.generateNOTNode(node)\n elif type(node) == sigma.parser.condition.ConditionNULLValue:\n return self.generateNULLValueNode(node)\n elif type(node) == sigma.parser.condition.ConditionNotNULLValue:\n return self.generateNotNULLValueNode(node)\n elif type(node) == sigma.parser.condition.NodeSubexpression:\n return self.generateSubexpressionNode(node)\n elif type(node) == tuple:\n return self.generateMapItemNode(node)\n elif type(node) in (str, int):\n return self.generateValueNode(node)\n elif type(node) == list:\n return self.generateListNode(node)\n else:\n raise TypeError(\"Node type %s was not expected in Sigma parse tree\" % (str(type(node))))\n\n def generateANDNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateORNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNOTNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateSubexpressionNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateListNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateMapItemNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNULLValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNotNULLValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateAggregation(self, agg):\n raise NotImplementedError(\"Aggregations not implemented for this backend\")\n\n def generateBefore(self, parsed):\n return \"\"\n\n def generateAfter(self, parsed):\n return \"\"\n\n def finalize(self):\n \"\"\"\n Is called after the last file was processed with generate(). The right place if this backend is not intended to\n look isolated at each rule, but generates an output which incorporates multiple rules, e.g. dashboards.\n \"\"\"\n pass\n\nclass SingleTextQueryBackend(RulenameCommentMixin, BaseBackend, QuoteCharMixin):\n \"\"\"Base class for backends that generate one text-based expression from a Sigma rule\"\"\"\n identifier = \"base-textquery\"\n active = False\n\n # the following class variables define the generation and behavior of queries from a parse tree some are prefilled with default values that are quite usual\n andToken = None # Token used for linking expressions with logical AND\n orToken = None # Same for OR\n notToken = None # Same for NOT\n subExpression = None # Syntax for subexpressions, usually parenthesis around it. %s is inner expression\n listExpression = None # Syntax for lists, %s are list items separated with listSeparator\n listSeparator = None # Character for separation of list items\n valueExpression = None # Expression of values, %s represents value\n nullExpression = None # Expression of queries for null values or non-existing fields. %s is field name\n notNullExpression = None # Expression of queries for not null values. %s is field name\n mapExpression = None # Syntax for field/value conditions. First %s is fieldname, second is value\n mapListsSpecialHandling = False # Same handling for map items with list values as for normal values (strings, integers) if True, generateMapItemListNode method is called with node\n mapListValueExpression = None # Syntax for field/value condititons where map value is a list\n\n def generateANDNode(self, node):\n generated = [ self.generateNode(val) for val in node ]\n filtered = [ g for g in generated if g is not None ]\n if filtered:\n return self.andToken.join(filtered)\n else:\n return None\n\n def generateORNode(self, node):\n generated = [ self.generateNode(val) for val in node ]\n filtered = [ g for g in generated if g is not None ]\n if filtered:\n return self.orToken.join(filtered)\n else:\n return None\n\n def generateNOTNode(self, node):\n generated = self.generateNode(node.item)\n if generated is not None:\n return self.notToken + generated\n else:\n return None\n\n def generateSubexpressionNode(self, node):\n generated = self.generateNode(node.items)\n if generated:\n return self.subExpression % generated\n else:\n return None\n\n def generateListNode(self, node):\n if not set([type(value) for value in node]).issubset({str, int}):\n raise TypeError(\"List values must be strings or numbers\")\n return self.listExpression % (self.listSeparator.join([self.generateNode(value) for value in node]))\n\n def generateMapItemNode(self, node):\n fieldname, value = node\n\n transformed_fieldname = self.fieldNameMapping(fieldname, value)\n if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):\n return self.mapExpression % (transformed_fieldname, self.generateNode(value))\n elif type(value) == list:\n return self.generateMapItemListNode(transformed_fieldname, value)\n else:\n raise TypeError(\"Backend does not support map values of type \" + str(type(value)))\n\n def generateMapItemListNode(self, fieldname, value):\n return self.mapListValueExpression % (fieldname, self.generateNode(value))\n\n def generateValueNode(self, node):\n return self.valueExpression % (self.cleanValue(str(node)))\n\n def generateNULLValueNode(self, node):\n return self.nullExpression % (node.item)\n\n def generateNotNULLValueNode(self, node):\n return self.notNullExpression % (node.item)\n\n def fieldNameMapping(self, fieldname, value):\n \"\"\"\n Alter field names depending on the value(s). Backends may use this method to perform a final transformation of the field name\n in addition to the field mapping defined in the conversion configuration. The field name passed to this method was already\n transformed from the original name given in the Sigma rule.\n \"\"\"\n return fieldname\n", "path": "tools/sigma/backends/base.py"}]} | 3,028 | 726 |
gh_patches_debug_50783 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3282 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in NL API, 'Sentiment' object has no attribute 'score'
Upon executing the following code on GCE & locally I'm getting 'score' not found error locally while it works on GCE.
```
def analyze_text(text):
language_client = language.Client()
document = language_client.document_from_text(text)
annotations = document.annotate_text(include_sentiment=True)
print(annotations.sentiment.score)
print (annotations.sentiment.magnitude)
```
Only difference I can find is on GCE, the gcloud version is different than the local
**GCE gcloud**
gcloud --version
Google Cloud SDK 145.0.0
alpha 2017.02.21
app-engine-python 1.9.50
beta 2017.02.21
bq 2.0.24
bq-nix 2.0.24
core 2017.02.21
core-nix 2017.02.21
gcloud
gcloud-deps 2017.02.21
gcloud-deps-linux-x86_64 2017.02.21
gsutil 4.22
gsutil-nix 4.22
**Local gcloud**
gcloud --version
Google Cloud SDK 148.0.1
bq 2.0.24
bq-nix 2.0.24
core 2017.03.24
core-nix 2016.11.07
gcloud
gcloud-deps 2017.03.17
gcloud-deps-darwin-x86_64 2017.02.21
gsutil 4.23
gsutil-nix 4.19
Debugging locally, the response from NL API consist of polarity property and not score.
<img width="761" alt="image" src="https://cloud.githubusercontent.com/assets/1888535/24331449/62cbdbee-1252-11e7-9d29-8699df078fcf.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `language/google/cloud/language/sentiment.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Definition for Google Cloud Natural Language API sentiment.
16
17 Sentiment is the response to an ``analyzeSentiment`` request.
18 """
19
20
21 class Sentiment(object):
22 """A Google Cloud Natural Language API sentiment object.
23
24 .. _Sentiment message: https://cloud.google.com/natural-language/\
25 reference/rest/v1/Sentiment
26 .. _Sentiment basics: https://cloud.google.com/natural-language/\
27 docs/basics#sentiment-analysis-values
28
29 See `Sentiment message`_ and `Sentiment basics`_.
30
31 :type score: float
32 :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range.
33 Larger numbers represent more positive sentiments.
34
35 :type magnitude: float
36 :param magnitude: A non-negative number in the ``[0, +inf)`` range, which
37 represents the absolute magnitude of sentiment
38 regardless of score (positive or negative).
39 """
40
41 def __init__(self, score, magnitude):
42 self.score = score
43 self.magnitude = magnitude
44
45 @classmethod
46 def from_api_repr(cls, payload):
47 """Convert a Sentiment from the JSON API into a :class:`Sentiment`.
48
49 :param payload: dict
50 :type payload: The value from the backend.
51
52 :rtype: :class:`Sentiment`
53 :returns: The sentiment parsed from the API representation.
54 """
55 score = payload['score']
56 magnitude = payload['magnitude']
57 return cls(score, magnitude)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/language/google/cloud/language/sentiment.py b/language/google/cloud/language/sentiment.py
--- a/language/google/cloud/language/sentiment.py
+++ b/language/google/cloud/language/sentiment.py
@@ -52,6 +52,6 @@
:rtype: :class:`Sentiment`
:returns: The sentiment parsed from the API representation.
"""
- score = payload['score']
+ score = payload.get('score', None)
magnitude = payload['magnitude']
return cls(score, magnitude)
| {"golden_diff": "diff --git a/language/google/cloud/language/sentiment.py b/language/google/cloud/language/sentiment.py\n--- a/language/google/cloud/language/sentiment.py\n+++ b/language/google/cloud/language/sentiment.py\n@@ -52,6 +52,6 @@\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n- score = payload['score']\n+ score = payload.get('score', None)\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "issue": "Error in NL API, 'Sentiment' object has no attribute 'score'\nUpon executing the following code on GCE & locally I'm getting 'score' not found error locally while it works on GCE.\r\n\r\n```\r\ndef analyze_text(text):\r\n language_client = language.Client()\r\n document = language_client.document_from_text(text)\r\n annotations = document.annotate_text(include_sentiment=True)\r\n print(annotations.sentiment.score)\r\n print (annotations.sentiment.magnitude)\r\n```\r\n\r\nOnly difference I can find is on GCE, the gcloud version is different than the local\r\n**GCE gcloud**\r\ngcloud --version\r\nGoogle Cloud SDK 145.0.0\r\nalpha 2017.02.21\r\napp-engine-python 1.9.50\r\nbeta 2017.02.21\r\nbq 2.0.24\r\nbq-nix 2.0.24\r\ncore 2017.02.21\r\ncore-nix 2017.02.21\r\ngcloud \r\ngcloud-deps 2017.02.21\r\ngcloud-deps-linux-x86_64 2017.02.21\r\ngsutil 4.22\r\ngsutil-nix 4.22\r\n\r\n**Local gcloud**\r\ngcloud --version\r\nGoogle Cloud SDK 148.0.1\r\nbq 2.0.24\r\nbq-nix 2.0.24\r\ncore 2017.03.24\r\ncore-nix 2016.11.07\r\ngcloud \r\ngcloud-deps 2017.03.17\r\ngcloud-deps-darwin-x86_64 2017.02.21\r\ngsutil 4.23\r\ngsutil-nix 4.19\r\n\r\nDebugging locally, the response from NL API consist of polarity property and not score.\r\n\r\n<img width=\"761\" alt=\"image\" src=\"https://cloud.githubusercontent.com/assets/1888535/24331449/62cbdbee-1252-11e7-9d29-8699df078fcf.png\">\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API sentiment.\n\nSentiment is the response to an ``analyzeSentiment`` request.\n\"\"\"\n\n\nclass Sentiment(object):\n \"\"\"A Google Cloud Natural Language API sentiment object.\n\n .. _Sentiment message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Sentiment\n .. _Sentiment basics: https://cloud.google.com/natural-language/\\\n docs/basics#sentiment-analysis-values\n\n See `Sentiment message`_ and `Sentiment basics`_.\n\n :type score: float\n :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range.\n Larger numbers represent more positive sentiments.\n\n :type magnitude: float\n :param magnitude: A non-negative number in the ``[0, +inf)`` range, which\n represents the absolute magnitude of sentiment\n regardless of score (positive or negative).\n \"\"\"\n\n def __init__(self, score, magnitude):\n self.score = score\n self.magnitude = magnitude\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert a Sentiment from the JSON API into a :class:`Sentiment`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n score = payload['score']\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "path": "language/google/cloud/language/sentiment.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API sentiment.\n\nSentiment is the response to an ``analyzeSentiment`` request.\n\"\"\"\n\n\nclass Sentiment(object):\n \"\"\"A Google Cloud Natural Language API sentiment object.\n\n .. _Sentiment message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Sentiment\n .. _Sentiment basics: https://cloud.google.com/natural-language/\\\n docs/basics#sentiment-analysis-values\n\n See `Sentiment message`_ and `Sentiment basics`_.\n\n :type score: float\n :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range.\n Larger numbers represent more positive sentiments.\n\n :type magnitude: float\n :param magnitude: A non-negative number in the ``[0, +inf)`` range, which\n represents the absolute magnitude of sentiment\n regardless of score (positive or negative).\n \"\"\"\n\n def __init__(self, score, magnitude):\n self.score = score\n self.magnitude = magnitude\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert a Sentiment from the JSON API into a :class:`Sentiment`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n score = payload.get('score', None)\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "path": "language/google/cloud/language/sentiment.py"}]} | 1,335 | 113 |
gh_patches_debug_7619 | rasdani/github-patches | git_diff | pyca__cryptography-2682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Any interest for an override parameter for _MAX_CLOCK_SKEW?
It seems like a narrow assumption to assume that the actors communicating via Fernet-encrypted messages will always be able to have somewhat synchronized clocks. In our case, we have no way to amend the clocks of the systems upon which our software runs and they often differ by at least minutes, if not hours or days.
Our current solution is to just stomp on `cryptography.fernet._MAX_CLOCK_SKEW` and set it to some `LARGE_VALUE`. Not ideal, but it gets the job done for now.
I can think of several options to improve the situation here, but most of them involve changes to this library's code.
1) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` function to disable the clock skew check altogether.
2) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` that defaults to `_MAX_CLOCK_SKEW` and use that value for the clock skew check.
3) Add ability to set an offset on the Fernet object (or pass it in to `decrypt()`) that would be applied to the extracted timestamp prior to being checked against `_MAX_CLOCK_SKEW`. It would be the library user's responsibility for determining the appropriate offset value for each external actor.
4) I could subclass Fernet and implement some appropriate behavior in my new class (would essentially involve copying the whole `decrypt()` function into my codebase just to tweak this specific portion's behavior)
Would any of options 1-3 be worth pursuing to the point of a pull-request? I can probably make a pretty good effort on each of them, but I don't want to spend the time if the maintainers don't think any are worth attempting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/fernet.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import base64
8 import binascii
9 import os
10 import struct
11 import time
12
13 import six
14
15 from cryptography.exceptions import InvalidSignature
16 from cryptography.hazmat.backends import default_backend
17 from cryptography.hazmat.primitives import hashes, padding
18 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
19 from cryptography.hazmat.primitives.hmac import HMAC
20
21
22 class InvalidToken(Exception):
23 pass
24
25
26 _MAX_CLOCK_SKEW = 60
27
28
29 class Fernet(object):
30 def __init__(self, key, backend=None):
31 if backend is None:
32 backend = default_backend()
33
34 key = base64.urlsafe_b64decode(key)
35 if len(key) != 32:
36 raise ValueError(
37 "Fernet key must be 32 url-safe base64-encoded bytes."
38 )
39
40 self._signing_key = key[:16]
41 self._encryption_key = key[16:]
42 self._backend = backend
43
44 @classmethod
45 def generate_key(cls):
46 return base64.urlsafe_b64encode(os.urandom(32))
47
48 def encrypt(self, data):
49 current_time = int(time.time())
50 iv = os.urandom(16)
51 return self._encrypt_from_parts(data, current_time, iv)
52
53 def _encrypt_from_parts(self, data, current_time, iv):
54 if not isinstance(data, bytes):
55 raise TypeError("data must be bytes.")
56
57 padder = padding.PKCS7(algorithms.AES.block_size).padder()
58 padded_data = padder.update(data) + padder.finalize()
59 encryptor = Cipher(
60 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend
61 ).encryptor()
62 ciphertext = encryptor.update(padded_data) + encryptor.finalize()
63
64 basic_parts = (
65 b"\x80" + struct.pack(">Q", current_time) + iv + ciphertext
66 )
67
68 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
69 h.update(basic_parts)
70 hmac = h.finalize()
71 return base64.urlsafe_b64encode(basic_parts + hmac)
72
73 def decrypt(self, token, ttl=None):
74 if not isinstance(token, bytes):
75 raise TypeError("token must be bytes.")
76
77 current_time = int(time.time())
78
79 try:
80 data = base64.urlsafe_b64decode(token)
81 except (TypeError, binascii.Error):
82 raise InvalidToken
83
84 if not data or six.indexbytes(data, 0) != 0x80:
85 raise InvalidToken
86
87 try:
88 timestamp, = struct.unpack(">Q", data[1:9])
89 except struct.error:
90 raise InvalidToken
91 if ttl is not None:
92 if timestamp + ttl < current_time:
93 raise InvalidToken
94 if current_time + _MAX_CLOCK_SKEW < timestamp:
95 raise InvalidToken
96 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
97 h.update(data[:-32])
98 try:
99 h.verify(data[-32:])
100 except InvalidSignature:
101 raise InvalidToken
102
103 iv = data[9:25]
104 ciphertext = data[25:-32]
105 decryptor = Cipher(
106 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend
107 ).decryptor()
108 plaintext_padded = decryptor.update(ciphertext)
109 try:
110 plaintext_padded += decryptor.finalize()
111 except ValueError:
112 raise InvalidToken
113 unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
114
115 unpadded = unpadder.update(plaintext_padded)
116 try:
117 unpadded += unpadder.finalize()
118 except ValueError:
119 raise InvalidToken
120 return unpadded
121
122
123 class MultiFernet(object):
124 def __init__(self, fernets):
125 fernets = list(fernets)
126 if not fernets:
127 raise ValueError(
128 "MultiFernet requires at least one Fernet instance"
129 )
130 self._fernets = fernets
131
132 def encrypt(self, msg):
133 return self._fernets[0].encrypt(msg)
134
135 def decrypt(self, msg, ttl=None):
136 for f in self._fernets:
137 try:
138 return f.decrypt(msg, ttl)
139 except InvalidToken:
140 pass
141 raise InvalidToken
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/fernet.py b/src/cryptography/fernet.py
--- a/src/cryptography/fernet.py
+++ b/src/cryptography/fernet.py
@@ -91,8 +91,10 @@
if ttl is not None:
if timestamp + ttl < current_time:
raise InvalidToken
- if current_time + _MAX_CLOCK_SKEW < timestamp:
- raise InvalidToken
+
+ if current_time + _MAX_CLOCK_SKEW < timestamp:
+ raise InvalidToken
+
h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
h.update(data[:-32])
try:
| {"golden_diff": "diff --git a/src/cryptography/fernet.py b/src/cryptography/fernet.py\n--- a/src/cryptography/fernet.py\n+++ b/src/cryptography/fernet.py\n@@ -91,8 +91,10 @@\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n- if current_time + _MAX_CLOCK_SKEW < timestamp:\n- raise InvalidToken\n+\n+ if current_time + _MAX_CLOCK_SKEW < timestamp:\n+ raise InvalidToken\n+\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n", "issue": "Any interest for an override parameter for _MAX_CLOCK_SKEW?\nIt seems like a narrow assumption to assume that the actors communicating via Fernet-encrypted messages will always be able to have somewhat synchronized clocks. In our case, we have no way to amend the clocks of the systems upon which our software runs and they often differ by at least minutes, if not hours or days.\n\nOur current solution is to just stomp on `cryptography.fernet._MAX_CLOCK_SKEW` and set it to some `LARGE_VALUE`. Not ideal, but it gets the job done for now.\n\nI can think of several options to improve the situation here, but most of them involve changes to this library's code.\n\n1) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` function to disable the clock skew check altogether.\n2) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` that defaults to `_MAX_CLOCK_SKEW` and use that value for the clock skew check.\n3) Add ability to set an offset on the Fernet object (or pass it in to `decrypt()`) that would be applied to the extracted timestamp prior to being checked against `_MAX_CLOCK_SKEW`. It would be the library user's responsibility for determining the appropriate offset value for each external actor.\n4) I could subclass Fernet and implement some appropriate behavior in my new class (would essentially involve copying the whole `decrypt()` function into my codebase just to tweak this specific portion's behavior)\n\nWould any of options 1-3 be worth pursuing to the point of a pull-request? I can probably make a pretty good effort on each of them, but I don't want to spend the time if the maintainers don't think any are worth attempting.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport binascii\nimport os\nimport struct\nimport time\n\nimport six\n\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass InvalidToken(Exception):\n pass\n\n\n_MAX_CLOCK_SKEW = 60\n\n\nclass Fernet(object):\n def __init__(self, key, backend=None):\n if backend is None:\n backend = default_backend()\n\n key = base64.urlsafe_b64decode(key)\n if len(key) != 32:\n raise ValueError(\n \"Fernet key must be 32 url-safe base64-encoded bytes.\"\n )\n\n self._signing_key = key[:16]\n self._encryption_key = key[16:]\n self._backend = backend\n\n @classmethod\n def generate_key(cls):\n return base64.urlsafe_b64encode(os.urandom(32))\n\n def encrypt(self, data):\n current_time = int(time.time())\n iv = os.urandom(16)\n return self._encrypt_from_parts(data, current_time, iv)\n\n def _encrypt_from_parts(self, data, current_time, iv):\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n\n padder = padding.PKCS7(algorithms.AES.block_size).padder()\n padded_data = padder.update(data) + padder.finalize()\n encryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).encryptor()\n ciphertext = encryptor.update(padded_data) + encryptor.finalize()\n\n basic_parts = (\n b\"\\x80\" + struct.pack(\">Q\", current_time) + iv + ciphertext\n )\n\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(basic_parts)\n hmac = h.finalize()\n return base64.urlsafe_b64encode(basic_parts + hmac)\n\n def decrypt(self, token, ttl=None):\n if not isinstance(token, bytes):\n raise TypeError(\"token must be bytes.\")\n\n current_time = int(time.time())\n\n try:\n data = base64.urlsafe_b64decode(token)\n except (TypeError, binascii.Error):\n raise InvalidToken\n\n if not data or six.indexbytes(data, 0) != 0x80:\n raise InvalidToken\n\n try:\n timestamp, = struct.unpack(\">Q\", data[1:9])\n except struct.error:\n raise InvalidToken\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n if current_time + _MAX_CLOCK_SKEW < timestamp:\n raise InvalidToken\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n h.verify(data[-32:])\n except InvalidSignature:\n raise InvalidToken\n\n iv = data[9:25]\n ciphertext = data[25:-32]\n decryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).decryptor()\n plaintext_padded = decryptor.update(ciphertext)\n try:\n plaintext_padded += decryptor.finalize()\n except ValueError:\n raise InvalidToken\n unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()\n\n unpadded = unpadder.update(plaintext_padded)\n try:\n unpadded += unpadder.finalize()\n except ValueError:\n raise InvalidToken\n return unpadded\n\n\nclass MultiFernet(object):\n def __init__(self, fernets):\n fernets = list(fernets)\n if not fernets:\n raise ValueError(\n \"MultiFernet requires at least one Fernet instance\"\n )\n self._fernets = fernets\n\n def encrypt(self, msg):\n return self._fernets[0].encrypt(msg)\n\n def decrypt(self, msg, ttl=None):\n for f in self._fernets:\n try:\n return f.decrypt(msg, ttl)\n except InvalidToken:\n pass\n raise InvalidToken\n", "path": "src/cryptography/fernet.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport binascii\nimport os\nimport struct\nimport time\n\nimport six\n\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass InvalidToken(Exception):\n pass\n\n\n_MAX_CLOCK_SKEW = 60\n\n\nclass Fernet(object):\n def __init__(self, key, backend=None):\n if backend is None:\n backend = default_backend()\n\n key = base64.urlsafe_b64decode(key)\n if len(key) != 32:\n raise ValueError(\n \"Fernet key must be 32 url-safe base64-encoded bytes.\"\n )\n\n self._signing_key = key[:16]\n self._encryption_key = key[16:]\n self._backend = backend\n\n @classmethod\n def generate_key(cls):\n return base64.urlsafe_b64encode(os.urandom(32))\n\n def encrypt(self, data):\n current_time = int(time.time())\n iv = os.urandom(16)\n return self._encrypt_from_parts(data, current_time, iv)\n\n def _encrypt_from_parts(self, data, current_time, iv):\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n\n padder = padding.PKCS7(algorithms.AES.block_size).padder()\n padded_data = padder.update(data) + padder.finalize()\n encryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).encryptor()\n ciphertext = encryptor.update(padded_data) + encryptor.finalize()\n\n basic_parts = (\n b\"\\x80\" + struct.pack(\">Q\", current_time) + iv + ciphertext\n )\n\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(basic_parts)\n hmac = h.finalize()\n return base64.urlsafe_b64encode(basic_parts + hmac)\n\n def decrypt(self, token, ttl=None):\n if not isinstance(token, bytes):\n raise TypeError(\"token must be bytes.\")\n\n current_time = int(time.time())\n\n try:\n data = base64.urlsafe_b64decode(token)\n except (TypeError, binascii.Error):\n raise InvalidToken\n\n if not data or six.indexbytes(data, 0) != 0x80:\n raise InvalidToken\n\n try:\n timestamp, = struct.unpack(\">Q\", data[1:9])\n except struct.error:\n raise InvalidToken\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n\n if current_time + _MAX_CLOCK_SKEW < timestamp:\n raise InvalidToken\n\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n h.verify(data[-32:])\n except InvalidSignature:\n raise InvalidToken\n\n iv = data[9:25]\n ciphertext = data[25:-32]\n decryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).decryptor()\n plaintext_padded = decryptor.update(ciphertext)\n try:\n plaintext_padded += decryptor.finalize()\n except ValueError:\n raise InvalidToken\n unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()\n\n unpadded = unpadder.update(plaintext_padded)\n try:\n unpadded += unpadder.finalize()\n except ValueError:\n raise InvalidToken\n return unpadded\n\n\nclass MultiFernet(object):\n def __init__(self, fernets):\n fernets = list(fernets)\n if not fernets:\n raise ValueError(\n \"MultiFernet requires at least one Fernet instance\"\n )\n self._fernets = fernets\n\n def encrypt(self, msg):\n return self._fernets[0].encrypt(msg)\n\n def decrypt(self, msg, ttl=None):\n for f in self._fernets:\n try:\n return f.decrypt(msg, ttl)\n except InvalidToken:\n pass\n raise InvalidToken\n", "path": "src/cryptography/fernet.py"}]} | 1,968 | 151 |
gh_patches_debug_7377 | rasdani/github-patches | git_diff | tobymao__sqlglot-1951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
error in optimizer.qualify with mixed identifier quotation.
**Fully reproducible code snippet**:
```
from sqlglot import parse_one
from sqlglot.dialects.oracle import Oracle
from sqlglot.lineage import lineage
from sqlglot.optimizer.qualify import qualify
parser = Oracle()
sql = """
with base as
(
select x.dummy as COL_1
from dual x
)
select b."COL_1"
from base b
"""
ast = parse_one(sql, read=parser)
qualified = qualify(
ast,
dialect=parser,
schema=None,
)
```
error:
```
Exception has occurred: OptimizeError
Unknown column: COL_1
File "C:\Users\itay5\Desktop\work\sql\main.py", line 20, in <module>
qualified = qualify(
^^^^^^^^
sqlglot.errors.OptimizeError: Unknown column: COL_1
```
I get the same error while trying to use identify=False, and still no luck.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/oracle.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import seq_get
8 from sqlglot.tokens import TokenType
9
10
11 def _parse_xml_table(self: parser.Parser) -> exp.XMLTable:
12 this = self._parse_string()
13
14 passing = None
15 columns = None
16
17 if self._match_text_seq("PASSING"):
18 # The BY VALUE keywords are optional and are provided for semantic clarity
19 self._match_text_seq("BY", "VALUE")
20 passing = self._parse_csv(self._parse_column)
21
22 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
23
24 if self._match_text_seq("COLUMNS"):
25 columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))
26
27 return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
28
29
30 class Oracle(Dialect):
31 ALIAS_POST_TABLESAMPLE = True
32
33 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
34 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
35 TIME_MAPPING = {
36 "AM": "%p", # Meridian indicator with or without periods
37 "A.M.": "%p", # Meridian indicator with or without periods
38 "PM": "%p", # Meridian indicator with or without periods
39 "P.M.": "%p", # Meridian indicator with or without periods
40 "D": "%u", # Day of week (1-7)
41 "DAY": "%A", # name of day
42 "DD": "%d", # day of month (1-31)
43 "DDD": "%j", # day of year (1-366)
44 "DY": "%a", # abbreviated name of day
45 "HH": "%I", # Hour of day (1-12)
46 "HH12": "%I", # alias for HH
47 "HH24": "%H", # Hour of day (0-23)
48 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
49 "MI": "%M", # Minute (0-59)
50 "MM": "%m", # Month (01-12; January = 01)
51 "MON": "%b", # Abbreviated name of month
52 "MONTH": "%B", # Name of month
53 "SS": "%S", # Second (0-59)
54 "WW": "%W", # Week of year (1-53)
55 "YY": "%y", # 15
56 "YYYY": "%Y", # 2015
57 }
58
59 class Parser(parser.Parser):
60 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
61
62 FUNCTIONS = {
63 **parser.Parser.FUNCTIONS,
64 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
65 }
66
67 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
68 **parser.Parser.FUNCTION_PARSERS,
69 "XMLTABLE": _parse_xml_table,
70 }
71
72 TYPE_LITERAL_PARSERS = {
73 exp.DataType.Type.DATE: lambda self, this, _: self.expression(
74 exp.DateStrToDate, this=this
75 )
76 }
77
78 def _parse_column(self) -> t.Optional[exp.Expression]:
79 column = super()._parse_column()
80 if column:
81 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
82 return column
83
84 def _parse_hint(self) -> t.Optional[exp.Hint]:
85 if self._match(TokenType.HINT):
86 start = self._curr
87 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
88 self._advance()
89
90 if not self._curr:
91 self.raise_error("Expected */ after HINT")
92
93 end = self._tokens[self._index - 3]
94 return exp.Hint(expressions=[self._find_sql(start, end)])
95
96 return None
97
98 class Generator(generator.Generator):
99 LOCKING_READS_SUPPORTED = True
100 JOIN_HINTS = False
101 TABLE_HINTS = False
102 COLUMN_JOIN_MARKS_SUPPORTED = True
103
104 LIMIT_FETCH = "FETCH"
105
106 TYPE_MAPPING = {
107 **generator.Generator.TYPE_MAPPING,
108 exp.DataType.Type.TINYINT: "NUMBER",
109 exp.DataType.Type.SMALLINT: "NUMBER",
110 exp.DataType.Type.INT: "NUMBER",
111 exp.DataType.Type.BIGINT: "NUMBER",
112 exp.DataType.Type.DECIMAL: "NUMBER",
113 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
114 exp.DataType.Type.VARCHAR: "VARCHAR2",
115 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
116 exp.DataType.Type.NCHAR: "NCHAR",
117 exp.DataType.Type.TEXT: "CLOB",
118 exp.DataType.Type.BINARY: "BLOB",
119 exp.DataType.Type.VARBINARY: "BLOB",
120 }
121
122 TRANSFORMS = {
123 **generator.Generator.TRANSFORMS,
124 exp.DateStrToDate: lambda self, e: self.func(
125 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
126 ),
127 exp.Group: transforms.preprocess([transforms.unalias_group]),
128 exp.ILike: no_ilike_sql,
129 exp.Coalesce: rename_func("NVL"),
130 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
131 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
132 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
133 exp.Substring: rename_func("SUBSTR"),
134 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
135 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
136 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
137 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
138 exp.Trim: trim_sql,
139 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
140 }
141
142 PROPERTIES_LOCATION = {
143 **generator.Generator.PROPERTIES_LOCATION,
144 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
145 }
146
147 def offset_sql(self, expression: exp.Offset) -> str:
148 return f"{super().offset_sql(expression)} ROWS"
149
150 def xmltable_sql(self, expression: exp.XMLTable) -> str:
151 this = self.sql(expression, "this")
152 passing = self.expressions(expression, key="passing")
153 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
154 columns = self.expressions(expression, key="columns")
155 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
156 by_ref = (
157 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
158 )
159 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
160
161 class Tokenizer(tokens.Tokenizer):
162 VAR_SINGLE_TOKENS = {"@"}
163
164 KEYWORDS = {
165 **tokens.Tokenizer.KEYWORDS,
166 "(+)": TokenType.JOIN_MARKER,
167 "BINARY_DOUBLE": TokenType.DOUBLE,
168 "BINARY_FLOAT": TokenType.FLOAT,
169 "COLUMNS": TokenType.COLUMN,
170 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
171 "MINUS": TokenType.EXCEPT,
172 "NVARCHAR2": TokenType.NVARCHAR,
173 "SAMPLE": TokenType.TABLE_SAMPLE,
174 "START": TokenType.BEGIN,
175 "TOP": TokenType.TOP,
176 "VARCHAR2": TokenType.VARCHAR,
177 }
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -30,6 +30,9 @@
class Oracle(Dialect):
ALIAS_POST_TABLESAMPLE = True
+ # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
+ RESOLVES_IDENTIFIERS_AS_UPPERCASE = True
+
# https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
TIME_MAPPING = {
| {"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -30,6 +30,9 @@\n class Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n \n+ # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n+ RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n+\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n", "issue": "error in optimizer.qualify with mixed identifier quotation.\n**Fully reproducible code snippet**:\r\n```\r\nfrom sqlglot import parse_one\r\nfrom sqlglot.dialects.oracle import Oracle\r\nfrom sqlglot.lineage import lineage\r\nfrom sqlglot.optimizer.qualify import qualify\r\n\r\nparser = Oracle()\r\n\r\nsql = \"\"\"\r\nwith base as\r\n(\r\n\r\n select x.dummy as COL_1\r\n from dual x\r\n)\r\nselect b.\"COL_1\"\r\nfrom base b\r\n\"\"\"\r\n\r\nast = parse_one(sql, read=parser)\r\nqualified = qualify(\r\n ast,\r\n dialect=parser,\r\n schema=None,\r\n )\r\n\r\n```\r\nerror:\r\n```\r\nException has occurred: OptimizeError\r\nUnknown column: COL_1\r\n File \"C:\\Users\\itay5\\Desktop\\work\\sql\\main.py\", line 20, in <module>\r\n qualified = qualify(\r\n ^^^^^^^^\r\nsqlglot.errors.OptimizeError: Unknown column: COL_1\r\n```\r\nI get the same error while trying to use identify=False, and still no luck.\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self: parser.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Coalesce: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self: parser.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n\n # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Coalesce: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]} | 2,756 | 187 |
gh_patches_debug_35745 | rasdani/github-patches | git_diff | uclapi__uclapi-977 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Webhooks
They haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/uclapi/roombookings/management/commands/trigger_webhooks.py`
Content:
```
1 from django.core.management.base import BaseCommand
2 from roombookings.models import Lock, BookingA, BookingB
3 from roombookings.helpers import _serialize_bookings
4 from dashboard.models import Webhook, WebhookTriggerHistory
5 from datetime import datetime
6 from deepdiff import DeepDiff
7 import grequests
8 from django.utils import timezone
9
10
11 class Command(BaseCommand):
12
13 help = 'Diff roombooking result sets and notify relevant webhooks'
14
15 def handle(self, *args, **options):
16 self.stdout.write("Triggering webhooks")
17
18 # currently locked table is the old one, more recent one is not locked
19 lock = Lock.objects.all()[0] # there is only ever one lock
20
21 if lock.bookingA:
22 old_booking_table = BookingA
23 new_booking_table = BookingB
24 else:
25 old_booking_table = BookingB
26 new_booking_table = BookingA
27
28 now = datetime.now()
29
30 old_bookings = _serialize_bookings(
31 old_booking_table.objects.filter(
32 startdatetime__gt=now
33 )
34 )
35 new_bookings = _serialize_bookings(
36 new_booking_table.objects.filter(
37 startdatetime__gt=now
38 )
39 )
40
41 ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)
42
43 webhooks = Webhook.objects.all()
44 # assumption: list of webhooks will be longer than ddiff
45
46 num_bookings_added = 0
47 num_bookings_removed = 0
48 if "iterable_item_added" in ddiff:
49 num_bookings_added = len(
50 ddiff["iterable_item_added"].values()
51 )
52
53 if "iterable_item_removed" in ddiff:
54 num_bookings_removed = len(
55 ddiff["iterable_item_removed"].values()
56 )
57
58 self.stdout.write(
59 "{} bookings added\n{} bookings removed.".format(
60 num_bookings_added,
61 num_bookings_removed
62 )
63 )
64
65 def webhook_map(webhook):
66 def webhook_filter(booking):
67 return (
68 (
69 webhook.siteid == '' or
70 booking["siteid"] == webhook.siteid
71 ) and
72 (
73 webhook.roomid == '' or
74 booking["roomid"] == webhook.roomid
75 ) and
76 (
77 webhook.contact == '' or
78 # mimick SQL 'like'
79 webhook.contact in str(booking["contact"])
80 )
81 )
82 output = {
83 "webhook_in_db": webhook,
84 "url": webhook.url,
85 "verification_secret": webhook.verification_secret
86 }
87 if "iterable_item_added" in ddiff:
88 bookings_added = list(filter(
89 webhook_filter, ddiff["iterable_item_added"].values()
90 ))
91 if bookings_added != []:
92 output["bookings_added"] = bookings_added
93 if "iterable_item_removed" in ddiff:
94 bookings_removed = list(filter(
95 webhook_filter, ddiff["iterable_item_removed"].values()
96 ))
97 if bookings_removed != []:
98 output["bookings_removed"] = bookings_removed
99
100 return output
101
102 webhooks_to_enact = list(map(webhook_map, webhooks))
103
104 unsent_requests = []
105 for idx, webhook in enumerate(webhooks_to_enact):
106 payload = {
107 "service": "roombookings",
108 "name": "bookings_changed",
109 "verification_secret": webhook["verification_secret"],
110 "content": {}
111 }
112
113 if "bookings_added" in webhook:
114 payload["content"]["bookings_added"] = (
115 webhook["bookings_added"]
116 )
117 if "bookings_removed" in webhook:
118 payload["content"]["bookings_removed"] = (
119 webhook["bookings_removed"]
120 )
121
122 webhooks_to_enact[idx]["payload"] = payload
123
124 if payload["content"] != {}:
125 unsent_requests.append(
126 grequests.post(
127 webhook["url"], json=payload, headers={
128 "User-Agent": "uclapi-bot/1"
129 }
130 )
131 )
132 self.stdout.write(
133 "Triggering {} webhooks.".format(len(unsent_requests))
134 )
135 grequests.map(unsent_requests)
136
137 for webhook in webhooks_to_enact:
138 if webhook["payload"]["content"] != {}:
139 webhook_in_db = webhook["webhook_in_db"]
140 webhook_in_db.last_fired = timezone.now()
141 webhook_in_db.save()
142
143 new_webhook_history_entry = WebhookTriggerHistory(
144 webhook=webhook_in_db,
145 payload=webhook["payload"]
146 )
147 new_webhook_history_entry.save()
148
149 self.stdout.write("Webhooks triggered.")
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -4,16 +4,25 @@
from dashboard.models import Webhook, WebhookTriggerHistory
from datetime import datetime
from deepdiff import DeepDiff
-import grequests
from django.utils import timezone
+from requests_futures.sessions import FuturesSession
class Command(BaseCommand):
help = 'Diff roombooking result sets and notify relevant webhooks'
+ def add_arguments(self, parser):
+ parser.add_argument(
+ '--debug',
+ action='store_true',
+ dest='debug',
+ help='Print webhook responses',
+ )
+
def handle(self, *args, **options):
self.stdout.write("Triggering webhooks")
+ session = FuturesSession()
# currently locked table is the old one, more recent one is not locked
lock = Lock.objects.all()[0] # there is only ever one lock
@@ -121,10 +130,11 @@
webhooks_to_enact[idx]["payload"] = payload
- if payload["content"] != {}:
+ if payload["content"] != {} and webhook["url"] != "":
unsent_requests.append(
- grequests.post(
- webhook["url"], json=payload, headers={
+ session.post(
+ webhook["url"], json=payload,
+ headers={
"User-Agent": "uclapi-bot/1"
}
)
@@ -132,7 +142,11 @@
self.stdout.write(
"Triggering {} webhooks.".format(len(unsent_requests))
)
- grequests.map(unsent_requests)
+ if("debug" in options):
+ for i in unsent_requests:
+ self.stdout.write(
+ 'response status {0}'.format(i.result().status_code)
+ )
for webhook in webhooks_to_enact:
if webhook["payload"]["content"] != {}:
| {"golden_diff": "diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n@@ -4,16 +4,25 @@\n from dashboard.models import Webhook, WebhookTriggerHistory\n from datetime import datetime\n from deepdiff import DeepDiff\n-import grequests\n from django.utils import timezone\n+from requests_futures.sessions import FuturesSession\n \n \n class Command(BaseCommand):\n \n help = 'Diff roombooking result sets and notify relevant webhooks'\n \n+ def add_arguments(self, parser):\n+ parser.add_argument(\n+ '--debug',\n+ action='store_true',\n+ dest='debug',\n+ help='Print webhook responses',\n+ )\n+\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n+ session = FuturesSession()\n \n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n@@ -121,10 +130,11 @@\n \n webhooks_to_enact[idx][\"payload\"] = payload\n \n- if payload[\"content\"] != {}:\n+ if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n- grequests.post(\n- webhook[\"url\"], json=payload, headers={\n+ session.post(\n+ webhook[\"url\"], json=payload,\n+ headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n@@ -132,7 +142,11 @@\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n- grequests.map(unsent_requests)\n+ if(\"debug\" in options):\n+ for i in unsent_requests:\n+ self.stdout.write(\n+ 'response status {0}'.format(i.result().status_code)\n+ )\n \n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n", "issue": "Webhooks\nThey haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import Lock, BookingA, BookingB\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nimport grequests\nfrom django.utils import timezone\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n\n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if lock.bookingA:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.all()\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {}:\n unsent_requests.append(\n grequests.post(\n webhook[\"url\"], json=payload, headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n grequests.map(unsent_requests)\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}], "after_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import Lock, BookingA, BookingB\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nfrom django.utils import timezone\nfrom requests_futures.sessions import FuturesSession\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--debug',\n action='store_true',\n dest='debug',\n help='Print webhook responses',\n )\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n session = FuturesSession()\n\n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if lock.bookingA:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.all()\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n session.post(\n webhook[\"url\"], json=payload,\n headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n if(\"debug\" in options):\n for i in unsent_requests:\n self.stdout.write(\n 'response status {0}'.format(i.result().status_code)\n )\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}]} | 1,648 | 495 |
gh_patches_debug_27920 | rasdani/github-patches | git_diff | litestar-org__litestar-1581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: `FileResponse` doesn't set `content-encoding` headers on gzip/brotli compressed files
### Description
When using `StaticFilesConfig` to serve compressed files (think `styles.css.gz`), Litestar will happily serve the file and even properly infer the mimetype, but won't set the correct `content-encoding` header required by the browser.
I will provide a PR with a test.
### URL to code causing the issue
_No response_
### MCVE
_No response_
### Steps to reproduce
_No response_
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
1.51.10
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/response/file.py`
Content:
```
1 from email.utils import formatdate
2 from inspect import iscoroutine
3 from mimetypes import guess_type
4 from typing import (
5 TYPE_CHECKING,
6 Any,
7 AsyncGenerator,
8 Coroutine,
9 Dict,
10 Literal,
11 Optional,
12 Union,
13 cast,
14 )
15 from urllib.parse import quote
16 from zlib import adler32
17
18 from starlite.constants import ONE_MEGABYTE
19 from starlite.enums import MediaType
20 from starlite.exceptions import ImproperlyConfiguredException
21 from starlite.response.streaming import StreamingResponse
22 from starlite.status_codes import HTTP_200_OK
23 from starlite.utils.file import BaseLocalFileSystem, FileSystemAdapter
24
25 if TYPE_CHECKING:
26 from os import PathLike
27 from os import stat_result as stat_result_type
28
29 from anyio import Path
30
31 from starlite.datastructures import BackgroundTask, BackgroundTasks, ETag
32 from starlite.types import (
33 HTTPResponseBodyEvent,
34 PathType,
35 Receive,
36 ResponseCookies,
37 Send,
38 )
39 from starlite.types.file_types import FileInfo, FileSystemProtocol
40
41
42 async def async_file_iterator(
43 file_path: "PathType", chunk_size: int, adapter: "FileSystemAdapter"
44 ) -> AsyncGenerator[bytes, None]:
45 """Return an async that asynchronously reads a file and yields its chunks.
46
47 Args:
48 file_path: A path to a file.
49 chunk_size: The chunk file to use.
50 adapter: File system adapter class.
51 adapter: File system adapter class.
52
53 Returns:
54 An async generator.
55 """
56 async with await adapter.open(file_path) as file:
57 while chunk := await file.read(chunk_size):
58 yield chunk
59
60
61 def create_etag_for_file(path: "PathType", modified_time: float, file_size: int) -> str:
62 """Create an etag.
63
64 Notes:
65 - Function is derived from flask.
66
67 Returns:
68 An etag.
69 """
70 check = adler32(str(path).encode("utf-8")) & 0xFFFFFFFF
71 return f'"{modified_time}-{file_size}-{check}"'
72
73
74 class FileResponse(StreamingResponse):
75 """A response, streaming a file as response body."""
76
77 __slots__ = (
78 "chunk_size",
79 "content_disposition_type",
80 "etag",
81 "file_path",
82 "filename",
83 "adapter",
84 "file_info",
85 )
86
87 def __init__(
88 self,
89 path: Union[str, "PathLike", "Path"],
90 *,
91 background: Optional[Union["BackgroundTask", "BackgroundTasks"]] = None,
92 chunk_size: int = ONE_MEGABYTE,
93 content_disposition_type: Literal["attachment", "inline"] = "attachment",
94 cookies: Optional["ResponseCookies"] = None,
95 encoding: str = "utf-8",
96 etag: Optional["ETag"] = None,
97 file_system: Optional["FileSystemProtocol"] = None,
98 filename: Optional[str] = None,
99 file_info: Optional["FileInfo"] = None,
100 headers: Optional[Dict[str, Any]] = None,
101 is_head_response: bool = False,
102 media_type: Optional[Union[Literal[MediaType.TEXT], str]] = None,
103 stat_result: Optional["stat_result_type"] = None,
104 status_code: int = HTTP_200_OK,
105 ) -> None:
106 """Initialize ``FileResponse``
107
108 Notes:
109 - This class extends the :class:`StreamingResponse <starlite.response.StreamingResponse>` class.
110
111 Args:
112 path: A file path in one of the supported formats.
113 status_code: An HTTP status code.
114 media_type: A value for the response 'Content-Type' header. If not provided, the value will be either
115 derived from the filename if provided and supported by the stdlib, or will default to
116 'application/octet-stream'.
117 background: A :class:`BackgroundTask <starlite.datastructures.BackgroundTask>` instance or
118 :class:`BackgroundTasks <starlite.datastructures.BackgroundTasks>` to execute after the response is finished.
119 Defaults to None.
120 headers: A string keyed dictionary of response headers. Header keys are insensitive.
121 cookies: A list of :class:`Cookie <starlite.datastructures.Cookie>` instances to be set under the response 'Set-Cookie' header.
122 encoding: The encoding to be used for the response headers.
123 is_head_response: Whether the response should send only the headers ("head" request) or also the content.
124 filename: An optional filename to set in the header.
125 stat_result: An optional result of calling 'os.stat'. If not provided, this will be done by the response
126 constructor.
127 chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.
128 content_disposition_type: The type of the 'Content-Disposition'. Either ``inline`` or ``attachment``.
129 etag: An optional :class:`ETag <starlite.datastructures.ETag>` instance.
130 If not provided, an etag will be automatically generated.
131 file_system: An implementation of the :class:`FileSystemProtocol <starlite.types.FileSystemProtocol>`. If provided
132 it will be used to load the file.
133 file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.
134 """
135 if not media_type:
136 mimetype, _ = guess_type(filename) if filename else (None, None)
137 media_type = mimetype or "application/octet-stream"
138
139 self.chunk_size = chunk_size
140 self.content_disposition_type = content_disposition_type
141 self.etag = etag
142 self.file_path = path
143 self.filename = filename or ""
144 self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())
145
146 super().__init__(
147 content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),
148 status_code=status_code,
149 media_type=media_type,
150 background=background,
151 headers=headers,
152 cookies=cookies,
153 encoding=encoding,
154 is_head_response=is_head_response,
155 )
156
157 if file_info:
158 self.file_info: Union["FileInfo", "Coroutine[Any, Any, 'FileInfo']"] = file_info
159 elif stat_result:
160 self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)
161 else:
162 self.file_info = self.adapter.info(self.file_path)
163
164 @property
165 def content_disposition(self) -> str:
166 """Content disposition.
167
168 Returns:
169 A value for the 'Content-Disposition' header.
170 """
171 quoted_filename = quote(self.filename)
172 is_utf8 = quoted_filename == self.filename
173 if is_utf8:
174 return f'{self.content_disposition_type}; filename="{self.filename}"'
175 return f"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}"
176
177 @property
178 def content_length(self) -> int:
179 """Content length of the response if applicable.
180
181 Returns:
182 Returns the value of 'self.stat_result.st_size' to populate the 'Content-Length' header.
183 """
184 if isinstance(self.file_info, dict):
185 return self.file_info["size"]
186 return 0
187
188 async def send_body(self, send: "Send", receive: "Receive") -> None:
189 """Emit a stream of events correlating with the response body.
190
191 Args:
192 send: The ASGI send function.
193 receive: The ASGI receive function.
194
195 Returns:
196 None
197 """
198 if self.chunk_size < self.content_length:
199 await super().send_body(send=send, receive=receive)
200 return
201
202 async with await self.adapter.open(self.file_path) as file:
203 body_event: "HTTPResponseBodyEvent" = {
204 "type": "http.response.body",
205 "body": await file.read(),
206 "more_body": False,
207 }
208 await send(body_event)
209
210 async def start_response(self, send: "Send") -> None:
211 """Emit the start event of the response. This event includes the headers and status codes.
212
213 Args:
214 send: The ASGI send function.
215
216 Returns:
217 None
218 """
219 try:
220 fs_info = self.file_info = cast(
221 "FileInfo", (await self.file_info if iscoroutine(self.file_info) else self.file_info)
222 )
223 except FileNotFoundError as e:
224 raise ImproperlyConfiguredException(f"{self.file_path} does not exist") from e
225
226 if fs_info["type"] != "file":
227 raise ImproperlyConfiguredException(f"{self.file_path} is not a file")
228
229 self.set_header("last-modified", formatdate(fs_info["mtime"], usegmt=True))
230 self.set_header("content-disposition", self.content_disposition)
231 self.set_etag(
232 self.etag
233 or create_etag_for_file(path=self.file_path, modified_time=fs_info["mtime"], file_size=fs_info["size"])
234 )
235
236 await super().start_response(send=send)
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlite/response/file.py b/starlite/response/file.py
--- a/starlite/response/file.py
+++ b/starlite/response/file.py
@@ -1,6 +1,6 @@
from email.utils import formatdate
from inspect import iscoroutine
-from mimetypes import guess_type
+from mimetypes import encodings_map, guess_type
from typing import (
TYPE_CHECKING,
Any,
@@ -38,6 +38,9 @@
)
from starlite.types.file_types import FileInfo, FileSystemProtocol
+# brotli not supported in 'mimetypes.encodings_map' until py 3.9.
+encodings_map[".br"] = "br"
+
async def async_file_iterator(
file_path: "PathType", chunk_size: int, adapter: "FileSystemAdapter"
@@ -133,8 +136,11 @@
file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.
"""
if not media_type:
- mimetype, _ = guess_type(filename) if filename else (None, None)
+ mimetype, content_encoding = guess_type(filename) if filename else (None, None)
media_type = mimetype or "application/octet-stream"
+ if content_encoding is not None:
+ headers = headers or {}
+ headers.update({"content-encoding": content_encoding})
self.chunk_size = chunk_size
self.content_disposition_type = content_disposition_type
| {"golden_diff": "diff --git a/starlite/response/file.py b/starlite/response/file.py\n--- a/starlite/response/file.py\n+++ b/starlite/response/file.py\n@@ -1,6 +1,6 @@\n from email.utils import formatdate\n from inspect import iscoroutine\n-from mimetypes import guess_type\n+from mimetypes import encodings_map, guess_type\n from typing import (\n TYPE_CHECKING,\n Any,\n@@ -38,6 +38,9 @@\n )\n from starlite.types.file_types import FileInfo, FileSystemProtocol\n \n+# brotli not supported in 'mimetypes.encodings_map' until py 3.9.\n+encodings_map[\".br\"] = \"br\"\n+\n \n async def async_file_iterator(\n file_path: \"PathType\", chunk_size: int, adapter: \"FileSystemAdapter\"\n@@ -133,8 +136,11 @@\n file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.\n \"\"\"\n if not media_type:\n- mimetype, _ = guess_type(filename) if filename else (None, None)\n+ mimetype, content_encoding = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n+ if content_encoding is not None:\n+ headers = headers or {}\n+ headers.update({\"content-encoding\": content_encoding})\n \n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n", "issue": "Bug: `FileResponse` doesn't set `content-encoding` headers on gzip/brotli compressed files\n### Description\n\nWhen using `StaticFilesConfig` to serve compressed files (think `styles.css.gz`), Litestar will happily serve the file and even properly infer the mimetype, but won't set the correct `content-encoding` header required by the browser.\r\n\r\nI will provide a PR with a test.\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n_No response_\n\n### Steps to reproduce\n\n_No response_\n\n### Screenshots\n\n```bash\n\"\"\n```\n\n\n### Logs\n\n_No response_\n\n### Litestar Version\n\n1.51.10\n\n### Platform\n\n- [X] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n", "before_files": [{"content": "from email.utils import formatdate\nfrom inspect import iscoroutine\nfrom mimetypes import guess_type\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncGenerator,\n Coroutine,\n Dict,\n Literal,\n Optional,\n Union,\n cast,\n)\nfrom urllib.parse import quote\nfrom zlib import adler32\n\nfrom starlite.constants import ONE_MEGABYTE\nfrom starlite.enums import MediaType\nfrom starlite.exceptions import ImproperlyConfiguredException\nfrom starlite.response.streaming import StreamingResponse\nfrom starlite.status_codes import HTTP_200_OK\nfrom starlite.utils.file import BaseLocalFileSystem, FileSystemAdapter\n\nif TYPE_CHECKING:\n from os import PathLike\n from os import stat_result as stat_result_type\n\n from anyio import Path\n\n from starlite.datastructures import BackgroundTask, BackgroundTasks, ETag\n from starlite.types import (\n HTTPResponseBodyEvent,\n PathType,\n Receive,\n ResponseCookies,\n Send,\n )\n from starlite.types.file_types import FileInfo, FileSystemProtocol\n\n\nasync def async_file_iterator(\n file_path: \"PathType\", chunk_size: int, adapter: \"FileSystemAdapter\"\n) -> AsyncGenerator[bytes, None]:\n \"\"\"Return an async that asynchronously reads a file and yields its chunks.\n\n Args:\n file_path: A path to a file.\n chunk_size: The chunk file to use.\n adapter: File system adapter class.\n adapter: File system adapter class.\n\n Returns:\n An async generator.\n \"\"\"\n async with await adapter.open(file_path) as file:\n while chunk := await file.read(chunk_size):\n yield chunk\n\n\ndef create_etag_for_file(path: \"PathType\", modified_time: float, file_size: int) -> str:\n \"\"\"Create an etag.\n\n Notes:\n - Function is derived from flask.\n\n Returns:\n An etag.\n \"\"\"\n check = adler32(str(path).encode(\"utf-8\")) & 0xFFFFFFFF\n return f'\"{modified_time}-{file_size}-{check}\"'\n\n\nclass FileResponse(StreamingResponse):\n \"\"\"A response, streaming a file as response body.\"\"\"\n\n __slots__ = (\n \"chunk_size\",\n \"content_disposition_type\",\n \"etag\",\n \"file_path\",\n \"filename\",\n \"adapter\",\n \"file_info\",\n )\n\n def __init__(\n self,\n path: Union[str, \"PathLike\", \"Path\"],\n *,\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n chunk_size: int = ONE_MEGABYTE,\n content_disposition_type: Literal[\"attachment\", \"inline\"] = \"attachment\",\n cookies: Optional[\"ResponseCookies\"] = None,\n encoding: str = \"utf-8\",\n etag: Optional[\"ETag\"] = None,\n file_system: Optional[\"FileSystemProtocol\"] = None,\n filename: Optional[str] = None,\n file_info: Optional[\"FileInfo\"] = None,\n headers: Optional[Dict[str, Any]] = None,\n is_head_response: bool = False,\n media_type: Optional[Union[Literal[MediaType.TEXT], str]] = None,\n stat_result: Optional[\"stat_result_type\"] = None,\n status_code: int = HTTP_200_OK,\n ) -> None:\n \"\"\"Initialize ``FileResponse``\n\n Notes:\n - This class extends the :class:`StreamingResponse <starlite.response.StreamingResponse>` class.\n\n Args:\n path: A file path in one of the supported formats.\n status_code: An HTTP status code.\n media_type: A value for the response 'Content-Type' header. If not provided, the value will be either\n derived from the filename if provided and supported by the stdlib, or will default to\n 'application/octet-stream'.\n background: A :class:`BackgroundTask <starlite.datastructures.BackgroundTask>` instance or\n :class:`BackgroundTasks <starlite.datastructures.BackgroundTasks>` to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of :class:`Cookie <starlite.datastructures.Cookie>` instances to be set under the response 'Set-Cookie' header.\n encoding: The encoding to be used for the response headers.\n is_head_response: Whether the response should send only the headers (\"head\" request) or also the content.\n filename: An optional filename to set in the header.\n stat_result: An optional result of calling 'os.stat'. If not provided, this will be done by the response\n constructor.\n chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.\n content_disposition_type: The type of the 'Content-Disposition'. Either ``inline`` or ``attachment``.\n etag: An optional :class:`ETag <starlite.datastructures.ETag>` instance.\n If not provided, an etag will be automatically generated.\n file_system: An implementation of the :class:`FileSystemProtocol <starlite.types.FileSystemProtocol>`. If provided\n it will be used to load the file.\n file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.\n \"\"\"\n if not media_type:\n mimetype, _ = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n\n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n self.etag = etag\n self.file_path = path\n self.filename = filename or \"\"\n self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())\n\n super().__init__(\n content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),\n status_code=status_code,\n media_type=media_type,\n background=background,\n headers=headers,\n cookies=cookies,\n encoding=encoding,\n is_head_response=is_head_response,\n )\n\n if file_info:\n self.file_info: Union[\"FileInfo\", \"Coroutine[Any, Any, 'FileInfo']\"] = file_info\n elif stat_result:\n self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)\n else:\n self.file_info = self.adapter.info(self.file_path)\n\n @property\n def content_disposition(self) -> str:\n \"\"\"Content disposition.\n\n Returns:\n A value for the 'Content-Disposition' header.\n \"\"\"\n quoted_filename = quote(self.filename)\n is_utf8 = quoted_filename == self.filename\n if is_utf8:\n return f'{self.content_disposition_type}; filename=\"{self.filename}\"'\n return f\"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}\"\n\n @property\n def content_length(self) -> int:\n \"\"\"Content length of the response if applicable.\n\n Returns:\n Returns the value of 'self.stat_result.st_size' to populate the 'Content-Length' header.\n \"\"\"\n if isinstance(self.file_info, dict):\n return self.file_info[\"size\"]\n return 0\n\n async def send_body(self, send: \"Send\", receive: \"Receive\") -> None:\n \"\"\"Emit a stream of events correlating with the response body.\n\n Args:\n send: The ASGI send function.\n receive: The ASGI receive function.\n\n Returns:\n None\n \"\"\"\n if self.chunk_size < self.content_length:\n await super().send_body(send=send, receive=receive)\n return\n\n async with await self.adapter.open(self.file_path) as file:\n body_event: \"HTTPResponseBodyEvent\" = {\n \"type\": \"http.response.body\",\n \"body\": await file.read(),\n \"more_body\": False,\n }\n await send(body_event)\n\n async def start_response(self, send: \"Send\") -> None:\n \"\"\"Emit the start event of the response. This event includes the headers and status codes.\n\n Args:\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n fs_info = self.file_info = cast(\n \"FileInfo\", (await self.file_info if iscoroutine(self.file_info) else self.file_info)\n )\n except FileNotFoundError as e:\n raise ImproperlyConfiguredException(f\"{self.file_path} does not exist\") from e\n\n if fs_info[\"type\"] != \"file\":\n raise ImproperlyConfiguredException(f\"{self.file_path} is not a file\")\n\n self.set_header(\"last-modified\", formatdate(fs_info[\"mtime\"], usegmt=True))\n self.set_header(\"content-disposition\", self.content_disposition)\n self.set_etag(\n self.etag\n or create_etag_for_file(path=self.file_path, modified_time=fs_info[\"mtime\"], file_size=fs_info[\"size\"])\n )\n\n await super().start_response(send=send)\n", "path": "starlite/response/file.py"}], "after_files": [{"content": "from email.utils import formatdate\nfrom inspect import iscoroutine\nfrom mimetypes import encodings_map, guess_type\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncGenerator,\n Coroutine,\n Dict,\n Literal,\n Optional,\n Union,\n cast,\n)\nfrom urllib.parse import quote\nfrom zlib import adler32\n\nfrom starlite.constants import ONE_MEGABYTE\nfrom starlite.enums import MediaType\nfrom starlite.exceptions import ImproperlyConfiguredException\nfrom starlite.response.streaming import StreamingResponse\nfrom starlite.status_codes import HTTP_200_OK\nfrom starlite.utils.file import BaseLocalFileSystem, FileSystemAdapter\n\nif TYPE_CHECKING:\n from os import PathLike\n from os import stat_result as stat_result_type\n\n from anyio import Path\n\n from starlite.datastructures import BackgroundTask, BackgroundTasks, ETag\n from starlite.types import (\n HTTPResponseBodyEvent,\n PathType,\n Receive,\n ResponseCookies,\n Send,\n )\n from starlite.types.file_types import FileInfo, FileSystemProtocol\n\n# brotli not supported in 'mimetypes.encodings_map' until py 3.9.\nencodings_map[\".br\"] = \"br\"\n\n\nasync def async_file_iterator(\n file_path: \"PathType\", chunk_size: int, adapter: \"FileSystemAdapter\"\n) -> AsyncGenerator[bytes, None]:\n \"\"\"Return an async that asynchronously reads a file and yields its chunks.\n\n Args:\n file_path: A path to a file.\n chunk_size: The chunk file to use.\n adapter: File system adapter class.\n adapter: File system adapter class.\n\n Returns:\n An async generator.\n \"\"\"\n async with await adapter.open(file_path) as file:\n while chunk := await file.read(chunk_size):\n yield chunk\n\n\ndef create_etag_for_file(path: \"PathType\", modified_time: float, file_size: int) -> str:\n \"\"\"Create an etag.\n\n Notes:\n - Function is derived from flask.\n\n Returns:\n An etag.\n \"\"\"\n check = adler32(str(path).encode(\"utf-8\")) & 0xFFFFFFFF\n return f'\"{modified_time}-{file_size}-{check}\"'\n\n\nclass FileResponse(StreamingResponse):\n \"\"\"A response, streaming a file as response body.\"\"\"\n\n __slots__ = (\n \"chunk_size\",\n \"content_disposition_type\",\n \"etag\",\n \"file_path\",\n \"filename\",\n \"adapter\",\n \"file_info\",\n )\n\n def __init__(\n self,\n path: Union[str, \"PathLike\", \"Path\"],\n *,\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n chunk_size: int = ONE_MEGABYTE,\n content_disposition_type: Literal[\"attachment\", \"inline\"] = \"attachment\",\n cookies: Optional[\"ResponseCookies\"] = None,\n encoding: str = \"utf-8\",\n etag: Optional[\"ETag\"] = None,\n file_system: Optional[\"FileSystemProtocol\"] = None,\n filename: Optional[str] = None,\n file_info: Optional[\"FileInfo\"] = None,\n headers: Optional[Dict[str, Any]] = None,\n is_head_response: bool = False,\n media_type: Optional[Union[Literal[MediaType.TEXT], str]] = None,\n stat_result: Optional[\"stat_result_type\"] = None,\n status_code: int = HTTP_200_OK,\n ) -> None:\n \"\"\"Initialize ``FileResponse``\n\n Notes:\n - This class extends the :class:`StreamingResponse <starlite.response.StreamingResponse>` class.\n\n Args:\n path: A file path in one of the supported formats.\n status_code: An HTTP status code.\n media_type: A value for the response 'Content-Type' header. If not provided, the value will be either\n derived from the filename if provided and supported by the stdlib, or will default to\n 'application/octet-stream'.\n background: A :class:`BackgroundTask <starlite.datastructures.BackgroundTask>` instance or\n :class:`BackgroundTasks <starlite.datastructures.BackgroundTasks>` to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of :class:`Cookie <starlite.datastructures.Cookie>` instances to be set under the response 'Set-Cookie' header.\n encoding: The encoding to be used for the response headers.\n is_head_response: Whether the response should send only the headers (\"head\" request) or also the content.\n filename: An optional filename to set in the header.\n stat_result: An optional result of calling 'os.stat'. If not provided, this will be done by the response\n constructor.\n chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.\n content_disposition_type: The type of the 'Content-Disposition'. Either ``inline`` or ``attachment``.\n etag: An optional :class:`ETag <starlite.datastructures.ETag>` instance.\n If not provided, an etag will be automatically generated.\n file_system: An implementation of the :class:`FileSystemProtocol <starlite.types.FileSystemProtocol>`. If provided\n it will be used to load the file.\n file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.\n \"\"\"\n if not media_type:\n mimetype, content_encoding = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n if content_encoding is not None:\n headers = headers or {}\n headers.update({\"content-encoding\": content_encoding})\n\n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n self.etag = etag\n self.file_path = path\n self.filename = filename or \"\"\n self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())\n\n super().__init__(\n content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),\n status_code=status_code,\n media_type=media_type,\n background=background,\n headers=headers,\n cookies=cookies,\n encoding=encoding,\n is_head_response=is_head_response,\n )\n\n if file_info:\n self.file_info: Union[\"FileInfo\", \"Coroutine[Any, Any, 'FileInfo']\"] = file_info\n elif stat_result:\n self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)\n else:\n self.file_info = self.adapter.info(self.file_path)\n\n @property\n def content_disposition(self) -> str:\n \"\"\"Content disposition.\n\n Returns:\n A value for the 'Content-Disposition' header.\n \"\"\"\n quoted_filename = quote(self.filename)\n is_utf8 = quoted_filename == self.filename\n if is_utf8:\n return f'{self.content_disposition_type}; filename=\"{self.filename}\"'\n return f\"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}\"\n\n @property\n def content_length(self) -> int:\n \"\"\"Content length of the response if applicable.\n\n Returns:\n Returns the value of 'self.stat_result.st_size' to populate the 'Content-Length' header.\n \"\"\"\n if isinstance(self.file_info, dict):\n return self.file_info[\"size\"]\n return 0\n\n async def send_body(self, send: \"Send\", receive: \"Receive\") -> None:\n \"\"\"Emit a stream of events correlating with the response body.\n\n Args:\n send: The ASGI send function.\n receive: The ASGI receive function.\n\n Returns:\n None\n \"\"\"\n if self.chunk_size < self.content_length:\n await super().send_body(send=send, receive=receive)\n return\n\n async with await self.adapter.open(self.file_path) as file:\n body_event: \"HTTPResponseBodyEvent\" = {\n \"type\": \"http.response.body\",\n \"body\": await file.read(),\n \"more_body\": False,\n }\n await send(body_event)\n\n async def start_response(self, send: \"Send\") -> None:\n \"\"\"Emit the start event of the response. This event includes the headers and status codes.\n\n Args:\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n fs_info = self.file_info = cast(\n \"FileInfo\", (await self.file_info if iscoroutine(self.file_info) else self.file_info)\n )\n except FileNotFoundError as e:\n raise ImproperlyConfiguredException(f\"{self.file_path} does not exist\") from e\n\n if fs_info[\"type\"] != \"file\":\n raise ImproperlyConfiguredException(f\"{self.file_path} is not a file\")\n\n self.set_header(\"last-modified\", formatdate(fs_info[\"mtime\"], usegmt=True))\n self.set_header(\"content-disposition\", self.content_disposition)\n self.set_etag(\n self.etag\n or create_etag_for_file(path=self.file_path, modified_time=fs_info[\"mtime\"], file_size=fs_info[\"size\"])\n )\n\n await super().start_response(send=send)\n", "path": "starlite/response/file.py"}]} | 2,963 | 318 |
gh_patches_debug_51896 | rasdani/github-patches | git_diff | geopandas__geopandas-1566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: the clip function don't take dynamically the geometry column name
Hi, today i noticed that the clip function didn't work for me but then i found which was the problem.
Import geodataframes with geometry called "WKT":
```
field_map = pd.read_csv('./field_map.csv')
field_map['WKT'] = field_map['WKT'].apply(wkt.loads)
field_map = gpd.GeoDataFrame(field_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})
print(field_map.columns)
boundary_map = pd.read_csv('./boundary_map.csv')
boundary_map['WKT'] = boundary_map['WKT'].apply(wkt.loads)
boundary_map = gpd.GeoDataFrame(boundary_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})
print(boundary_map.columns)
> Index(['Unnamed: 0', 'IDX', 'Value', 'WKT', 'WKTTypeID', 'IDXmaster'], dtype='object')
> Index(['Unnamed: 0', 'WKT'], dtype='object')
```
Clip the map and plot to validate:
```
clip_map = gpd.clip(field_map, boundary_map)
fig, ax = plt.subplots(figsize=(10,10))
clip_map.plot(ax=ax)
boundary_map.geometry.boundary.plot(ax=ax, color='red')
```

it seems that the clip has not worked but if we look at the of clip_map columns we see "WKT" and "geometry"

**SOLUTION:**
This worked for me, renaming the geometry column as "geometry"
```
field_map = field_map.rename_geometry('geometry')
boundary_map = boundary_map.rename_geometry('geometry')
clip_map = gpd.clip(field_map, boundary_map)
fig, ax = plt.subplots(figsize=(10,10))
clip_map.plot(ax=ax)
boundary_map.geometry.boundary.plot(ax=ax, color='red')
```

The clip function now work correctly
Regards
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/tools/clip.py`
Content:
```
1 """
2 geopandas.clip
3 ==============
4
5 A module to clip vector data using GeoPandas.
6
7 """
8 import warnings
9
10 import numpy as np
11 import pandas as pd
12
13 from shapely.geometry import Polygon, MultiPolygon
14
15 from geopandas import GeoDataFrame, GeoSeries
16 from geopandas.array import _check_crs, _crs_mismatch_warn
17
18
19 def _clip_points(gdf, poly):
20 """Clip point geometry to the polygon extent.
21
22 Clip an input point GeoDataFrame to the polygon extent of the poly
23 parameter. Points that intersect the poly geometry are extracted with
24 associated attributes and returned.
25
26 Parameters
27 ----------
28 gdf : GeoDataFrame, GeoSeries
29 Composed of point geometry that will be clipped to the poly.
30
31 poly : (Multi)Polygon
32 Reference geometry used to spatially clip the data.
33
34 Returns
35 -------
36 GeoDataFrame
37 The returned GeoDataFrame is a subset of gdf that intersects
38 with poly.
39 """
40 return gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
41
42
43 def _clip_line_poly(gdf, poly):
44 """Clip line and polygon geometry to the polygon extent.
45
46 Clip an input line or polygon to the polygon extent of the poly
47 parameter. Parts of Lines or Polygons that intersect the poly geometry are
48 extracted with associated attributes and returned.
49
50 Parameters
51 ----------
52 gdf : GeoDataFrame, GeoSeries
53 Line or polygon geometry that is clipped to poly.
54
55 poly : (Multi)Polygon
56 Reference polygon for clipping.
57
58 Returns
59 -------
60 GeoDataFrame
61 The returned GeoDataFrame is a clipped subset of gdf
62 that intersects with poly.
63 """
64 gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
65
66 # Clip the data with the polygon
67 if isinstance(gdf_sub, GeoDataFrame):
68 clipped = gdf_sub.copy()
69 clipped["geometry"] = gdf_sub.intersection(poly)
70 else:
71 # GeoSeries
72 clipped = gdf_sub.intersection(poly)
73
74 return clipped
75
76
77 def clip(gdf, mask, keep_geom_type=False):
78 """Clip points, lines, or polygon geometries to the mask extent.
79
80 Both layers must be in the same Coordinate Reference System (CRS).
81 The `gdf` will be clipped to the full extent of the clip object.
82
83 If there are multiple polygons in mask, data from `gdf` will be
84 clipped to the total boundary of all polygons in mask.
85
86 Parameters
87 ----------
88 gdf : GeoDataFrame or GeoSeries
89 Vector layer (point, line, polygon) to be clipped to mask.
90 mask : GeoDataFrame, GeoSeries, (Multi)Polygon
91 Polygon vector layer used to clip `gdf`.
92 The mask's geometry is dissolved into one geometric feature
93 and intersected with `gdf`.
94 keep_geom_type : boolean, default False
95 If True, return only geometries of original type in case of intersection
96 resulting in multiple geometry types or GeometryCollections.
97 If False, return all resulting geometries (potentially mixed-types).
98
99 Returns
100 -------
101 GeoDataFrame or GeoSeries
102 Vector data (points, lines, polygons) from `gdf` clipped to
103 polygon boundary from mask.
104
105 Examples
106 --------
107 Clip points (global cities) with a polygon (the South American continent):
108
109 >>> import geopandas
110 >>> path =
111 >>> world = geopandas.read_file(
112 ... geopandas.datasets.get_path('naturalearth_lowres'))
113 >>> south_america = world[world['continent'] == "South America"]
114 >>> capitals = geopandas.read_file(
115 ... geopandas.datasets.get_path('naturalearth_cities'))
116 >>> capitals.shape
117 (202, 2)
118 >>> sa_capitals = geopandas.clip(capitals, south_america)
119 >>> sa_capitals.shape
120 (12, 2)
121 """
122 if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
123 raise TypeError(
124 "'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
125 )
126
127 if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):
128 raise TypeError(
129 "'mask' should be GeoDataFrame, GeoSeries or"
130 "(Multi)Polygon, got {}".format(type(gdf))
131 )
132
133 if isinstance(mask, (GeoDataFrame, GeoSeries)):
134 if not _check_crs(gdf, mask):
135 _crs_mismatch_warn(gdf, mask, stacklevel=3)
136
137 if isinstance(mask, (GeoDataFrame, GeoSeries)):
138 box_mask = mask.total_bounds
139 else:
140 box_mask = mask.bounds
141 box_gdf = gdf.total_bounds
142 if not (
143 ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
144 and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
145 ):
146 return gdf.iloc[:0]
147
148 if isinstance(mask, (GeoDataFrame, GeoSeries)):
149 poly = mask.geometry.unary_union
150 else:
151 poly = mask
152
153 geom_types = gdf.geometry.type
154 poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
155 line_idx = np.asarray(
156 (geom_types == "LineString")
157 | (geom_types == "LinearRing")
158 | (geom_types == "MultiLineString")
159 )
160 point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
161 geomcoll_idx = np.asarray((geom_types == "GeometryCollection"))
162
163 if point_idx.any():
164 point_gdf = _clip_points(gdf[point_idx], poly)
165 else:
166 point_gdf = None
167
168 if poly_idx.any():
169 poly_gdf = _clip_line_poly(gdf[poly_idx], poly)
170 else:
171 poly_gdf = None
172
173 if line_idx.any():
174 line_gdf = _clip_line_poly(gdf[line_idx], poly)
175 else:
176 line_gdf = None
177
178 if geomcoll_idx.any():
179 geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)
180 else:
181 geomcoll_gdf = None
182
183 order = pd.Series(range(len(gdf)), index=gdf.index)
184 concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])
185
186 if keep_geom_type:
187 geomcoll_concat = (concat.geom_type == "GeometryCollection").any()
188 geomcoll_orig = geomcoll_idx.any()
189
190 new_collection = geomcoll_concat and not geomcoll_orig
191
192 if geomcoll_orig:
193 warnings.warn(
194 "keep_geom_type can not be called on a "
195 "GeoDataFrame with GeometryCollection."
196 )
197 else:
198 polys = ["Polygon", "MultiPolygon"]
199 lines = ["LineString", "MultiLineString", "LinearRing"]
200 points = ["Point", "MultiPoint"]
201
202 # Check that the gdf for multiple geom types (points, lines and/or polys)
203 orig_types_total = sum(
204 [
205 gdf.geom_type.isin(polys).any(),
206 gdf.geom_type.isin(lines).any(),
207 gdf.geom_type.isin(points).any(),
208 ]
209 )
210
211 # Check how many geometry types are in the clipped GeoDataFrame
212 clip_types_total = sum(
213 [
214 concat.geom_type.isin(polys).any(),
215 concat.geom_type.isin(lines).any(),
216 concat.geom_type.isin(points).any(),
217 ]
218 )
219
220 # Check there aren't any new geom types in the clipped GeoDataFrame
221 more_types = orig_types_total < clip_types_total
222
223 if orig_types_total > 1:
224 warnings.warn(
225 "keep_geom_type can not be called on a mixed type GeoDataFrame."
226 )
227 elif new_collection or more_types:
228 orig_type = gdf.geom_type.iloc[0]
229 if new_collection:
230 concat = concat.explode()
231 if orig_type in polys:
232 concat = concat.loc[concat.geom_type.isin(polys)]
233 elif orig_type in lines:
234 concat = concat.loc[concat.geom_type.isin(lines)]
235
236 # Return empty GeoDataFrame or GeoSeries if no shapes remain
237 if len(concat) == 0:
238 return gdf.iloc[:0]
239
240 # Preserve the original order of the input
241 if isinstance(concat, GeoDataFrame):
242 concat["_order"] = order
243 return concat.sort_values(by="_order").drop(columns="_order")
244 else:
245 concat = GeoDataFrame(geometry=concat)
246 concat["_order"] = order
247 return concat.sort_values(by="_order").geometry
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/tools/clip.py b/geopandas/tools/clip.py
--- a/geopandas/tools/clip.py
+++ b/geopandas/tools/clip.py
@@ -66,7 +66,7 @@
# Clip the data with the polygon
if isinstance(gdf_sub, GeoDataFrame):
clipped = gdf_sub.copy()
- clipped["geometry"] = gdf_sub.intersection(poly)
+ clipped[gdf.geometry.name] = gdf_sub.intersection(poly)
else:
# GeoSeries
clipped = gdf_sub.intersection(poly)
| {"golden_diff": "diff --git a/geopandas/tools/clip.py b/geopandas/tools/clip.py\n--- a/geopandas/tools/clip.py\n+++ b/geopandas/tools/clip.py\n@@ -66,7 +66,7 @@\n # Clip the data with the polygon\n if isinstance(gdf_sub, GeoDataFrame):\n clipped = gdf_sub.copy()\n- clipped[\"geometry\"] = gdf_sub.intersection(poly)\n+ clipped[gdf.geometry.name] = gdf_sub.intersection(poly)\n else:\n # GeoSeries\n clipped = gdf_sub.intersection(poly)\n", "issue": "BUG: the clip function don't take dynamically the geometry column name \nHi, today i noticed that the clip function didn't work for me but then i found which was the problem.\r\n\r\nImport geodataframes with geometry called \"WKT\":\r\n```\r\nfield_map = pd.read_csv('./field_map.csv')\r\nfield_map['WKT'] = field_map['WKT'].apply(wkt.loads)\r\nfield_map = gpd.GeoDataFrame(field_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})\r\n\r\nprint(field_map.columns)\r\n\r\nboundary_map = pd.read_csv('./boundary_map.csv')\r\nboundary_map['WKT'] = boundary_map['WKT'].apply(wkt.loads)\r\nboundary_map = gpd.GeoDataFrame(boundary_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})\r\n\r\nprint(boundary_map.columns)\r\n\r\n> Index(['Unnamed: 0', 'IDX', 'Value', 'WKT', 'WKTTypeID', 'IDXmaster'], dtype='object')\r\n> Index(['Unnamed: 0', 'WKT'], dtype='object')\r\n\r\n```\r\nClip the map and plot to validate:\r\n```\r\nclip_map = gpd.clip(field_map, boundary_map)\r\n\r\nfig, ax = plt.subplots(figsize=(10,10))\r\nclip_map.plot(ax=ax)\r\nboundary_map.geometry.boundary.plot(ax=ax, color='red')\r\n```\r\n\r\n\r\nit seems that the clip has not worked but if we look at the of clip_map columns we see \"WKT\" and \"geometry\"\r\n\r\n\r\n\r\n\r\n**SOLUTION:**\r\nThis worked for me, renaming the geometry column as \"geometry\"\r\n\r\n```\r\nfield_map = field_map.rename_geometry('geometry')\r\nboundary_map = boundary_map.rename_geometry('geometry')\r\n\r\nclip_map = gpd.clip(field_map, boundary_map)\r\n\r\nfig, ax = plt.subplots(figsize=(10,10))\r\nclip_map.plot(ax=ax)\r\nboundary_map.geometry.boundary.plot(ax=ax, color='red')\r\n```\r\n\r\n\r\n\r\nThe clip function now work correctly\r\nRegards\r\n\r\n\n", "before_files": [{"content": "\"\"\"\ngeopandas.clip\n==============\n\nA module to clip vector data using GeoPandas.\n\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely.geometry import Polygon, MultiPolygon\n\nfrom geopandas import GeoDataFrame, GeoSeries\nfrom geopandas.array import _check_crs, _crs_mismatch_warn\n\n\ndef _clip_points(gdf, poly):\n \"\"\"Clip point geometry to the polygon extent.\n\n Clip an input point GeoDataFrame to the polygon extent of the poly\n parameter. Points that intersect the poly geometry are extracted with\n associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Composed of point geometry that will be clipped to the poly.\n\n poly : (Multi)Polygon\n Reference geometry used to spatially clip the data.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a subset of gdf that intersects\n with poly.\n \"\"\"\n return gdf.iloc[gdf.sindex.query(poly, predicate=\"intersects\")]\n\n\ndef _clip_line_poly(gdf, poly):\n \"\"\"Clip line and polygon geometry to the polygon extent.\n\n Clip an input line or polygon to the polygon extent of the poly\n parameter. Parts of Lines or Polygons that intersect the poly geometry are\n extracted with associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Line or polygon geometry that is clipped to poly.\n\n poly : (Multi)Polygon\n Reference polygon for clipping.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a clipped subset of gdf\n that intersects with poly.\n \"\"\"\n gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate=\"intersects\")]\n\n # Clip the data with the polygon\n if isinstance(gdf_sub, GeoDataFrame):\n clipped = gdf_sub.copy()\n clipped[\"geometry\"] = gdf_sub.intersection(poly)\n else:\n # GeoSeries\n clipped = gdf_sub.intersection(poly)\n\n return clipped\n\n\ndef clip(gdf, mask, keep_geom_type=False):\n \"\"\"Clip points, lines, or polygon geometries to the mask extent.\n\n Both layers must be in the same Coordinate Reference System (CRS).\n The `gdf` will be clipped to the full extent of the clip object.\n\n If there are multiple polygons in mask, data from `gdf` will be\n clipped to the total boundary of all polygons in mask.\n\n Parameters\n ----------\n gdf : GeoDataFrame or GeoSeries\n Vector layer (point, line, polygon) to be clipped to mask.\n mask : GeoDataFrame, GeoSeries, (Multi)Polygon\n Polygon vector layer used to clip `gdf`.\n The mask's geometry is dissolved into one geometric feature\n and intersected with `gdf`.\n keep_geom_type : boolean, default False\n If True, return only geometries of original type in case of intersection\n resulting in multiple geometry types or GeometryCollections.\n If False, return all resulting geometries (potentially mixed-types).\n\n Returns\n -------\n GeoDataFrame or GeoSeries\n Vector data (points, lines, polygons) from `gdf` clipped to\n polygon boundary from mask.\n\n Examples\n --------\n Clip points (global cities) with a polygon (the South American continent):\n\n >>> import geopandas\n >>> path =\n >>> world = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_lowres'))\n >>> south_america = world[world['continent'] == \"South America\"]\n >>> capitals = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_cities'))\n >>> capitals.shape\n (202, 2)\n >>> sa_capitals = geopandas.clip(capitals, south_america)\n >>> sa_capitals.shape\n (12, 2)\n \"\"\"\n if not isinstance(gdf, (GeoDataFrame, GeoSeries)):\n raise TypeError(\n \"'gdf' should be GeoDataFrame or GeoSeries, got {}\".format(type(gdf))\n )\n\n if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):\n raise TypeError(\n \"'mask' should be GeoDataFrame, GeoSeries or\"\n \"(Multi)Polygon, got {}\".format(type(gdf))\n )\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n if not _check_crs(gdf, mask):\n _crs_mismatch_warn(gdf, mask, stacklevel=3)\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n box_mask = mask.total_bounds\n else:\n box_mask = mask.bounds\n box_gdf = gdf.total_bounds\n if not (\n ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))\n and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))\n ):\n return gdf.iloc[:0]\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n poly = mask.geometry.unary_union\n else:\n poly = mask\n\n geom_types = gdf.geometry.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"LinearRing\")\n | (geom_types == \"MultiLineString\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n geomcoll_idx = np.asarray((geom_types == \"GeometryCollection\"))\n\n if point_idx.any():\n point_gdf = _clip_points(gdf[point_idx], poly)\n else:\n point_gdf = None\n\n if poly_idx.any():\n poly_gdf = _clip_line_poly(gdf[poly_idx], poly)\n else:\n poly_gdf = None\n\n if line_idx.any():\n line_gdf = _clip_line_poly(gdf[line_idx], poly)\n else:\n line_gdf = None\n\n if geomcoll_idx.any():\n geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)\n else:\n geomcoll_gdf = None\n\n order = pd.Series(range(len(gdf)), index=gdf.index)\n concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])\n\n if keep_geom_type:\n geomcoll_concat = (concat.geom_type == \"GeometryCollection\").any()\n geomcoll_orig = geomcoll_idx.any()\n\n new_collection = geomcoll_concat and not geomcoll_orig\n\n if geomcoll_orig:\n warnings.warn(\n \"keep_geom_type can not be called on a \"\n \"GeoDataFrame with GeometryCollection.\"\n )\n else:\n polys = [\"Polygon\", \"MultiPolygon\"]\n lines = [\"LineString\", \"MultiLineString\", \"LinearRing\"]\n points = [\"Point\", \"MultiPoint\"]\n\n # Check that the gdf for multiple geom types (points, lines and/or polys)\n orig_types_total = sum(\n [\n gdf.geom_type.isin(polys).any(),\n gdf.geom_type.isin(lines).any(),\n gdf.geom_type.isin(points).any(),\n ]\n )\n\n # Check how many geometry types are in the clipped GeoDataFrame\n clip_types_total = sum(\n [\n concat.geom_type.isin(polys).any(),\n concat.geom_type.isin(lines).any(),\n concat.geom_type.isin(points).any(),\n ]\n )\n\n # Check there aren't any new geom types in the clipped GeoDataFrame\n more_types = orig_types_total < clip_types_total\n\n if orig_types_total > 1:\n warnings.warn(\n \"keep_geom_type can not be called on a mixed type GeoDataFrame.\"\n )\n elif new_collection or more_types:\n orig_type = gdf.geom_type.iloc[0]\n if new_collection:\n concat = concat.explode()\n if orig_type in polys:\n concat = concat.loc[concat.geom_type.isin(polys)]\n elif orig_type in lines:\n concat = concat.loc[concat.geom_type.isin(lines)]\n\n # Return empty GeoDataFrame or GeoSeries if no shapes remain\n if len(concat) == 0:\n return gdf.iloc[:0]\n\n # Preserve the original order of the input\n if isinstance(concat, GeoDataFrame):\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").drop(columns=\"_order\")\n else:\n concat = GeoDataFrame(geometry=concat)\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").geometry\n", "path": "geopandas/tools/clip.py"}], "after_files": [{"content": "\"\"\"\ngeopandas.clip\n==============\n\nA module to clip vector data using GeoPandas.\n\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely.geometry import Polygon, MultiPolygon\n\nfrom geopandas import GeoDataFrame, GeoSeries\nfrom geopandas.array import _check_crs, _crs_mismatch_warn\n\n\ndef _clip_points(gdf, poly):\n \"\"\"Clip point geometry to the polygon extent.\n\n Clip an input point GeoDataFrame to the polygon extent of the poly\n parameter. Points that intersect the poly geometry are extracted with\n associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Composed of point geometry that will be clipped to the poly.\n\n poly : (Multi)Polygon\n Reference geometry used to spatially clip the data.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a subset of gdf that intersects\n with poly.\n \"\"\"\n return gdf.iloc[gdf.sindex.query(poly, predicate=\"intersects\")]\n\n\ndef _clip_line_poly(gdf, poly):\n \"\"\"Clip line and polygon geometry to the polygon extent.\n\n Clip an input line or polygon to the polygon extent of the poly\n parameter. Parts of Lines or Polygons that intersect the poly geometry are\n extracted with associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Line or polygon geometry that is clipped to poly.\n\n poly : (Multi)Polygon\n Reference polygon for clipping.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a clipped subset of gdf\n that intersects with poly.\n \"\"\"\n gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate=\"intersects\")]\n\n # Clip the data with the polygon\n if isinstance(gdf_sub, GeoDataFrame):\n clipped = gdf_sub.copy()\n clipped[gdf.geometry.name] = gdf_sub.intersection(poly)\n else:\n # GeoSeries\n clipped = gdf_sub.intersection(poly)\n\n return clipped\n\n\ndef clip(gdf, mask, keep_geom_type=False):\n \"\"\"Clip points, lines, or polygon geometries to the mask extent.\n\n Both layers must be in the same Coordinate Reference System (CRS).\n The `gdf` will be clipped to the full extent of the clip object.\n\n If there are multiple polygons in mask, data from `gdf` will be\n clipped to the total boundary of all polygons in mask.\n\n Parameters\n ----------\n gdf : GeoDataFrame or GeoSeries\n Vector layer (point, line, polygon) to be clipped to mask.\n mask : GeoDataFrame, GeoSeries, (Multi)Polygon\n Polygon vector layer used to clip `gdf`.\n The mask's geometry is dissolved into one geometric feature\n and intersected with `gdf`.\n keep_geom_type : boolean, default False\n If True, return only geometries of original type in case of intersection\n resulting in multiple geometry types or GeometryCollections.\n If False, return all resulting geometries (potentially mixed-types).\n\n Returns\n -------\n GeoDataFrame or GeoSeries\n Vector data (points, lines, polygons) from `gdf` clipped to\n polygon boundary from mask.\n\n Examples\n --------\n Clip points (global cities) with a polygon (the South American continent):\n\n >>> import geopandas\n >>> path =\n >>> world = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_lowres'))\n >>> south_america = world[world['continent'] == \"South America\"]\n >>> capitals = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_cities'))\n >>> capitals.shape\n (202, 2)\n >>> sa_capitals = geopandas.clip(capitals, south_america)\n >>> sa_capitals.shape\n (12, 2)\n \"\"\"\n if not isinstance(gdf, (GeoDataFrame, GeoSeries)):\n raise TypeError(\n \"'gdf' should be GeoDataFrame or GeoSeries, got {}\".format(type(gdf))\n )\n\n if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):\n raise TypeError(\n \"'mask' should be GeoDataFrame, GeoSeries or\"\n \"(Multi)Polygon, got {}\".format(type(gdf))\n )\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n if not _check_crs(gdf, mask):\n _crs_mismatch_warn(gdf, mask, stacklevel=3)\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n box_mask = mask.total_bounds\n else:\n box_mask = mask.bounds\n box_gdf = gdf.total_bounds\n if not (\n ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))\n and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))\n ):\n return gdf.iloc[:0]\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n poly = mask.geometry.unary_union\n else:\n poly = mask\n\n geom_types = gdf.geometry.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"LinearRing\")\n | (geom_types == \"MultiLineString\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n geomcoll_idx = np.asarray((geom_types == \"GeometryCollection\"))\n\n if point_idx.any():\n point_gdf = _clip_points(gdf[point_idx], poly)\n else:\n point_gdf = None\n\n if poly_idx.any():\n poly_gdf = _clip_line_poly(gdf[poly_idx], poly)\n else:\n poly_gdf = None\n\n if line_idx.any():\n line_gdf = _clip_line_poly(gdf[line_idx], poly)\n else:\n line_gdf = None\n\n if geomcoll_idx.any():\n geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)\n else:\n geomcoll_gdf = None\n\n order = pd.Series(range(len(gdf)), index=gdf.index)\n concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])\n\n if keep_geom_type:\n geomcoll_concat = (concat.geom_type == \"GeometryCollection\").any()\n geomcoll_orig = geomcoll_idx.any()\n\n new_collection = geomcoll_concat and not geomcoll_orig\n\n if geomcoll_orig:\n warnings.warn(\n \"keep_geom_type can not be called on a \"\n \"GeoDataFrame with GeometryCollection.\"\n )\n else:\n polys = [\"Polygon\", \"MultiPolygon\"]\n lines = [\"LineString\", \"MultiLineString\", \"LinearRing\"]\n points = [\"Point\", \"MultiPoint\"]\n\n # Check that the gdf for multiple geom types (points, lines and/or polys)\n orig_types_total = sum(\n [\n gdf.geom_type.isin(polys).any(),\n gdf.geom_type.isin(lines).any(),\n gdf.geom_type.isin(points).any(),\n ]\n )\n\n # Check how many geometry types are in the clipped GeoDataFrame\n clip_types_total = sum(\n [\n concat.geom_type.isin(polys).any(),\n concat.geom_type.isin(lines).any(),\n concat.geom_type.isin(points).any(),\n ]\n )\n\n # Check there aren't any new geom types in the clipped GeoDataFrame\n more_types = orig_types_total < clip_types_total\n\n if orig_types_total > 1:\n warnings.warn(\n \"keep_geom_type can not be called on a mixed type GeoDataFrame.\"\n )\n elif new_collection or more_types:\n orig_type = gdf.geom_type.iloc[0]\n if new_collection:\n concat = concat.explode()\n if orig_type in polys:\n concat = concat.loc[concat.geom_type.isin(polys)]\n elif orig_type in lines:\n concat = concat.loc[concat.geom_type.isin(lines)]\n\n # Return empty GeoDataFrame or GeoSeries if no shapes remain\n if len(concat) == 0:\n return gdf.iloc[:0]\n\n # Preserve the original order of the input\n if isinstance(concat, GeoDataFrame):\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").drop(columns=\"_order\")\n else:\n concat = GeoDataFrame(geometry=concat)\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").geometry\n", "path": "geopandas/tools/clip.py"}]} | 3,407 | 124 |
gh_patches_debug_2043 | rasdani/github-patches | git_diff | boto__boto-215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RDS call modify_dbinstance with multi_az = True doesn't actually set an instance to MultiAZ
Making a call to a non-multiaz instance with multi_az=True doesn't actually switch the parameter. I assume this is also true for creating one from scratch, but I haven't tested that yet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/emr/connection.py`
Content:
```
1 # Copyright (c) 2010 Spotify AB
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a connection to the EMR service
24 """
25 import types
26
27 import boto
28 from boto.ec2.regioninfo import RegionInfo
29 from boto.emr.emrobject import JobFlow, RunJobFlowResponse
30 from boto.emr.step import JarStep
31 from boto.connection import AWSQueryConnection
32 from boto.exception import EmrResponseError
33
34 class EmrConnection(AWSQueryConnection):
35
36 APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
37 DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
38 DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
39 'elasticmapreduce.amazonaws.com')
40 ResponseError = EmrResponseError
41
42 # Constants for AWS Console debugging
43 DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
44 DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
45
46 def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
47 is_secure=True, port=None, proxy=None, proxy_port=None,
48 proxy_user=None, proxy_pass=None, debug=0,
49 https_connection_factory=None, region=None, path='/'):
50 if not region:
51 region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
52 self.region = region
53 AWSQueryConnection.__init__(self, aws_access_key_id,
54 aws_secret_access_key,
55 is_secure, port, proxy, proxy_port,
56 proxy_user, proxy_pass,
57 self.region.endpoint, debug,
58 https_connection_factory, path)
59
60 def _required_auth_capability(self):
61 return ['emr']
62
63 def describe_jobflow(self, jobflow_id):
64 """
65 Describes a single Elastic MapReduce job flow
66
67 :type jobflow_id: str
68 :param jobflow_id: The job flow id of interest
69 """
70 jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
71 if jobflows:
72 return jobflows[0]
73
74 def describe_jobflows(self, states=None, jobflow_ids=None,
75 created_after=None, created_before=None):
76 """
77 Retrieve all the Elastic MapReduce job flows on your account
78
79 :type states: list
80 :param states: A list of strings with job flow states wanted
81
82 :type jobflow_ids: list
83 :param jobflow_ids: A list of job flow IDs
84 :type created_after: datetime
85 :param created_after: Bound on job flow creation time
86
87 :type created_before: datetime
88 :param created_before: Bound on job flow creation time
89 """
90 params = {}
91
92 if states:
93 self.build_list_params(params, states, 'JobFlowStates.member')
94 if jobflow_ids:
95 self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
96 if created_after:
97 params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')
98 if created_before:
99 params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')
100
101 return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
102
103 def terminate_jobflow(self, jobflow_id):
104 """
105 Terminate an Elastic MapReduce job flow
106
107 :type jobflow_id: str
108 :param jobflow_id: A jobflow id
109 """
110 self.terminate_jobflows([jobflow_id])
111
112 def terminate_jobflows(self, jobflow_ids):
113 """
114 Terminate an Elastic MapReduce job flow
115
116 :type jobflow_ids: list
117 :param jobflow_ids: A list of job flow IDs
118 """
119 params = {}
120 self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
121 return self.get_status('TerminateJobFlows', params)
122
123 def add_jobflow_steps(self, jobflow_id, steps):
124 """
125 Adds steps to a jobflow
126
127 :type jobflow_id: str
128 :param jobflow_id: The job flow id
129 :type steps: list(boto.emr.Step)
130 :param steps: A list of steps to add to the job
131 """
132 if type(steps) != types.ListType:
133 steps = [steps]
134 params = {}
135 params['JobFlowId'] = jobflow_id
136
137 # Step args
138 step_args = [self._build_step_args(step) for step in steps]
139 params.update(self._build_step_list(step_args))
140
141 return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)
142
143 def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,
144 master_instance_type='m1.small',
145 slave_instance_type='m1.small', num_instances=1,
146 action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
147 enable_debugging=False,
148 hadoop_version='0.18',
149 steps=[],
150 bootstrap_actions=[]):
151 """
152 Runs a job flow
153
154 :type name: str
155 :param name: Name of the job flow
156 :type log_uri: str
157 :param log_uri: URI of the S3 bucket to place logs
158 :type ec2_keyname: str
159 :param ec2_keyname: EC2 key used for the instances
160 :type availability_zone: str
161 :param availability_zone: EC2 availability zone of the cluster
162 :type master_instance_type: str
163 :param master_instance_type: EC2 instance type of the master
164 :type slave_instance_type: str
165 :param slave_instance_type: EC2 instance type of the slave nodes
166 :type num_instances: int
167 :param num_instances: Number of instances in the Hadoop cluster
168 :type action_on_failure: str
169 :param action_on_failure: Action to take if a step terminates
170 :type keep_alive: bool
171 :param keep_alive: Denotes whether the cluster should stay alive upon completion
172 :type enable_debugging: bool
173 :param enable_debugging: Denotes whether AWS console debugging should be enabled.
174 :type steps: list(boto.emr.Step)
175 :param steps: List of steps to add with the job
176
177 :rtype: str
178 :return: The jobflow id
179 """
180 params = {}
181 if action_on_failure:
182 params['ActionOnFailure'] = action_on_failure
183 params['Name'] = name
184 params['LogUri'] = log_uri
185
186 # Instance args
187 instance_params = self._build_instance_args(ec2_keyname, availability_zone,
188 master_instance_type, slave_instance_type,
189 num_instances, keep_alive, hadoop_version)
190 params.update(instance_params)
191
192 # Debugging step from EMR API docs
193 if enable_debugging:
194 debugging_step = JarStep(name='Setup Hadoop Debugging',
195 action_on_failure='TERMINATE_JOB_FLOW',
196 main_class=None,
197 jar=self.DebuggingJar,
198 step_args=self.DebuggingArgs)
199 steps.insert(0, debugging_step)
200
201 # Step args
202 if steps:
203 step_args = [self._build_step_args(step) for step in steps]
204 params.update(self._build_step_list(step_args))
205
206 if bootstrap_actions:
207 bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
208 params.update(self._build_bootstrap_action_list(bootstrap_action_args))
209
210 response = self.get_object('RunJobFlow', params, RunJobFlowResponse)
211 return response.jobflowid
212
213 def _build_bootstrap_action_args(self, bootstrap_action):
214 bootstrap_action_params = {}
215 bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
216
217 try:
218 bootstrap_action_params['Name'] = bootstrap_action.name
219 except AttributeError:
220 pass
221
222 args = bootstrap_action.args()
223 if args:
224 self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')
225
226 return bootstrap_action_params
227
228 def _build_step_args(self, step):
229 step_params = {}
230 step_params['ActionOnFailure'] = step.action_on_failure
231 step_params['HadoopJarStep.Jar'] = step.jar()
232
233 main_class = step.main_class()
234 if main_class:
235 step_params['HadoopJarStep.MainClass'] = main_class
236
237 args = step.args()
238 if args:
239 self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')
240
241 step_params['Name'] = step.name
242 return step_params
243
244 def _build_bootstrap_action_list(self, bootstrap_actions):
245 if type(bootstrap_actions) != types.ListType:
246 bootstrap_actions = [bootstrap_actions]
247
248 params = {}
249 for i, bootstrap_action in enumerate(bootstrap_actions):
250 for key, value in bootstrap_action.iteritems():
251 params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value
252 return params
253
254 def _build_step_list(self, steps):
255 if type(steps) != types.ListType:
256 steps = [steps]
257
258 params = {}
259 for i, step in enumerate(steps):
260 for key, value in step.iteritems():
261 params['Steps.member.%s.%s' % (i+1, key)] = value
262 return params
263
264 def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,
265 slave_instance_type, num_instances, keep_alive, hadoop_version):
266 params = {
267 'Instances.MasterInstanceType' : master_instance_type,
268 'Instances.SlaveInstanceType' : slave_instance_type,
269 'Instances.InstanceCount' : num_instances,
270 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
271 'Instances.HadoopVersion' : hadoop_version
272 }
273
274 if ec2_keyname:
275 params['Instances.Ec2KeyName'] = ec2_keyname
276 if availability_zone:
277 params['Placement'] = availability_zone
278
279 return params
280
281
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boto/emr/connection.py b/boto/emr/connection.py
--- a/boto/emr/connection.py
+++ b/boto/emr/connection.py
@@ -274,7 +274,7 @@
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
- params['Placement'] = availability_zone
+ params['Placement.AvailabilityZone'] = availability_zone
return params
| {"golden_diff": "diff --git a/boto/emr/connection.py b/boto/emr/connection.py\n--- a/boto/emr/connection.py\n+++ b/boto/emr/connection.py\n@@ -274,7 +274,7 @@\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n- params['Placement'] = availability_zone\n+ params['Placement.AvailabilityZone'] = availability_zone\n \n return params\n", "issue": "RDS call modify_dbinstance with multi_az = True doesn't actually set an instance to MultiAZ\nMaking a call to a non-multiaz instance with multi_az=True doesn't actually switch the parameter. I assume this is also true for creating one from scratch, but I haven't tested that yet.\n\n", "before_files": [{"content": "# Copyright (c) 2010 Spotify AB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a connection to the EMR service\n\"\"\"\nimport types\n\nimport boto\nfrom boto.ec2.regioninfo import RegionInfo\nfrom boto.emr.emrobject import JobFlow, RunJobFlowResponse\nfrom boto.emr.step import JarStep\nfrom boto.connection import AWSQueryConnection\nfrom boto.exception import EmrResponseError\n\nclass EmrConnection(AWSQueryConnection):\n\n APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')\n DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')\n DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',\n 'elasticmapreduce.amazonaws.com')\n ResponseError = EmrResponseError\n\n # Constants for AWS Console debugging\n DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'\n DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None, debug=0,\n https_connection_factory=None, region=None, path='/'):\n if not region:\n region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)\n self.region = region\n AWSQueryConnection.__init__(self, aws_access_key_id,\n aws_secret_access_key,\n is_secure, port, proxy, proxy_port,\n proxy_user, proxy_pass,\n self.region.endpoint, debug,\n https_connection_factory, path)\n\n def _required_auth_capability(self):\n return ['emr']\n\n def describe_jobflow(self, jobflow_id):\n \"\"\"\n Describes a single Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id of interest\n \"\"\"\n jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])\n if jobflows:\n return jobflows[0]\n\n def describe_jobflows(self, states=None, jobflow_ids=None,\n created_after=None, created_before=None):\n \"\"\"\n Retrieve all the Elastic MapReduce job flows on your account\n\n :type states: list\n :param states: A list of strings with job flow states wanted\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n :type created_after: datetime\n :param created_after: Bound on job flow creation time\n\n :type created_before: datetime\n :param created_before: Bound on job flow creation time\n \"\"\"\n params = {}\n\n if states:\n self.build_list_params(params, states, 'JobFlowStates.member')\n if jobflow_ids:\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n if created_after:\n params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')\n if created_before:\n params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')\n\n return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])\n\n def terminate_jobflow(self, jobflow_id):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: A jobflow id \n \"\"\"\n self.terminate_jobflows([jobflow_id]) \n\n def terminate_jobflows(self, jobflow_ids):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n \"\"\"\n params = {}\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n return self.get_status('TerminateJobFlows', params)\n\n def add_jobflow_steps(self, jobflow_id, steps):\n \"\"\"\n Adds steps to a jobflow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id\n :type steps: list(boto.emr.Step)\n :param steps: A list of steps to add to the job\n \"\"\"\n if type(steps) != types.ListType:\n steps = [steps]\n params = {}\n params['JobFlowId'] = jobflow_id\n\n # Step args\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)\n\n def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,\n master_instance_type='m1.small',\n slave_instance_type='m1.small', num_instances=1,\n action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,\n enable_debugging=False,\n hadoop_version='0.18',\n steps=[],\n bootstrap_actions=[]):\n \"\"\"\n Runs a job flow\n\n :type name: str\n :param name: Name of the job flow\n :type log_uri: str\n :param log_uri: URI of the S3 bucket to place logs\n :type ec2_keyname: str\n :param ec2_keyname: EC2 key used for the instances\n :type availability_zone: str\n :param availability_zone: EC2 availability zone of the cluster\n :type master_instance_type: str\n :param master_instance_type: EC2 instance type of the master\n :type slave_instance_type: str\n :param slave_instance_type: EC2 instance type of the slave nodes\n :type num_instances: int\n :param num_instances: Number of instances in the Hadoop cluster\n :type action_on_failure: str\n :param action_on_failure: Action to take if a step terminates\n :type keep_alive: bool\n :param keep_alive: Denotes whether the cluster should stay alive upon completion\n :type enable_debugging: bool\n :param enable_debugging: Denotes whether AWS console debugging should be enabled.\n :type steps: list(boto.emr.Step)\n :param steps: List of steps to add with the job\n\n :rtype: str\n :return: The jobflow id\n \"\"\"\n params = {}\n if action_on_failure:\n params['ActionOnFailure'] = action_on_failure\n params['Name'] = name\n params['LogUri'] = log_uri\n\n # Instance args\n instance_params = self._build_instance_args(ec2_keyname, availability_zone,\n master_instance_type, slave_instance_type,\n num_instances, keep_alive, hadoop_version)\n params.update(instance_params)\n\n # Debugging step from EMR API docs\n if enable_debugging:\n debugging_step = JarStep(name='Setup Hadoop Debugging',\n action_on_failure='TERMINATE_JOB_FLOW',\n main_class=None,\n jar=self.DebuggingJar,\n step_args=self.DebuggingArgs)\n steps.insert(0, debugging_step)\n\n # Step args\n if steps:\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n if bootstrap_actions:\n bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]\n params.update(self._build_bootstrap_action_list(bootstrap_action_args))\n\n response = self.get_object('RunJobFlow', params, RunJobFlowResponse)\n return response.jobflowid\n\n def _build_bootstrap_action_args(self, bootstrap_action):\n bootstrap_action_params = {}\n bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path\n\n try:\n bootstrap_action_params['Name'] = bootstrap_action.name\n except AttributeError:\n pass\n\n args = bootstrap_action.args()\n if args:\n self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')\n\n return bootstrap_action_params\n\n def _build_step_args(self, step):\n step_params = {}\n step_params['ActionOnFailure'] = step.action_on_failure\n step_params['HadoopJarStep.Jar'] = step.jar()\n\n main_class = step.main_class()\n if main_class:\n step_params['HadoopJarStep.MainClass'] = main_class\n\n args = step.args()\n if args:\n self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')\n\n step_params['Name'] = step.name\n return step_params\n\n def _build_bootstrap_action_list(self, bootstrap_actions):\n if type(bootstrap_actions) != types.ListType:\n bootstrap_actions = [bootstrap_actions]\n\n params = {}\n for i, bootstrap_action in enumerate(bootstrap_actions):\n for key, value in bootstrap_action.iteritems():\n params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value\n return params\n\n def _build_step_list(self, steps):\n if type(steps) != types.ListType:\n steps = [steps]\n\n params = {}\n for i, step in enumerate(steps):\n for key, value in step.iteritems():\n params['Steps.member.%s.%s' % (i+1, key)] = value\n return params\n\n def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,\n slave_instance_type, num_instances, keep_alive, hadoop_version):\n params = {\n 'Instances.MasterInstanceType' : master_instance_type,\n 'Instances.SlaveInstanceType' : slave_instance_type,\n 'Instances.InstanceCount' : num_instances,\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\n 'Instances.HadoopVersion' : hadoop_version\n }\n\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n params['Placement'] = availability_zone\n\n return params\n\n", "path": "boto/emr/connection.py"}], "after_files": [{"content": "# Copyright (c) 2010 Spotify AB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a connection to the EMR service\n\"\"\"\nimport types\n\nimport boto\nfrom boto.ec2.regioninfo import RegionInfo\nfrom boto.emr.emrobject import JobFlow, RunJobFlowResponse\nfrom boto.emr.step import JarStep\nfrom boto.connection import AWSQueryConnection\nfrom boto.exception import EmrResponseError\n\nclass EmrConnection(AWSQueryConnection):\n\n APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')\n DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')\n DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',\n 'elasticmapreduce.amazonaws.com')\n ResponseError = EmrResponseError\n\n # Constants for AWS Console debugging\n DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'\n DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None, debug=0,\n https_connection_factory=None, region=None, path='/'):\n if not region:\n region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)\n self.region = region\n AWSQueryConnection.__init__(self, aws_access_key_id,\n aws_secret_access_key,\n is_secure, port, proxy, proxy_port,\n proxy_user, proxy_pass,\n self.region.endpoint, debug,\n https_connection_factory, path)\n\n def _required_auth_capability(self):\n return ['emr']\n\n def describe_jobflow(self, jobflow_id):\n \"\"\"\n Describes a single Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id of interest\n \"\"\"\n jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])\n if jobflows:\n return jobflows[0]\n\n def describe_jobflows(self, states=None, jobflow_ids=None,\n created_after=None, created_before=None):\n \"\"\"\n Retrieve all the Elastic MapReduce job flows on your account\n\n :type states: list\n :param states: A list of strings with job flow states wanted\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n :type created_after: datetime\n :param created_after: Bound on job flow creation time\n\n :type created_before: datetime\n :param created_before: Bound on job flow creation time\n \"\"\"\n params = {}\n\n if states:\n self.build_list_params(params, states, 'JobFlowStates.member')\n if jobflow_ids:\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n if created_after:\n params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')\n if created_before:\n params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')\n\n return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])\n\n def terminate_jobflow(self, jobflow_id):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: A jobflow id \n \"\"\"\n self.terminate_jobflows([jobflow_id]) \n\n def terminate_jobflows(self, jobflow_ids):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n \"\"\"\n params = {}\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n return self.get_status('TerminateJobFlows', params)\n\n def add_jobflow_steps(self, jobflow_id, steps):\n \"\"\"\n Adds steps to a jobflow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id\n :type steps: list(boto.emr.Step)\n :param steps: A list of steps to add to the job\n \"\"\"\n if type(steps) != types.ListType:\n steps = [steps]\n params = {}\n params['JobFlowId'] = jobflow_id\n\n # Step args\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)\n\n def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,\n master_instance_type='m1.small',\n slave_instance_type='m1.small', num_instances=1,\n action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,\n enable_debugging=False,\n hadoop_version='0.18',\n steps=[],\n bootstrap_actions=[]):\n \"\"\"\n Runs a job flow\n\n :type name: str\n :param name: Name of the job flow\n :type log_uri: str\n :param log_uri: URI of the S3 bucket to place logs\n :type ec2_keyname: str\n :param ec2_keyname: EC2 key used for the instances\n :type availability_zone: str\n :param availability_zone: EC2 availability zone of the cluster\n :type master_instance_type: str\n :param master_instance_type: EC2 instance type of the master\n :type slave_instance_type: str\n :param slave_instance_type: EC2 instance type of the slave nodes\n :type num_instances: int\n :param num_instances: Number of instances in the Hadoop cluster\n :type action_on_failure: str\n :param action_on_failure: Action to take if a step terminates\n :type keep_alive: bool\n :param keep_alive: Denotes whether the cluster should stay alive upon completion\n :type enable_debugging: bool\n :param enable_debugging: Denotes whether AWS console debugging should be enabled.\n :type steps: list(boto.emr.Step)\n :param steps: List of steps to add with the job\n\n :rtype: str\n :return: The jobflow id\n \"\"\"\n params = {}\n if action_on_failure:\n params['ActionOnFailure'] = action_on_failure\n params['Name'] = name\n params['LogUri'] = log_uri\n\n # Instance args\n instance_params = self._build_instance_args(ec2_keyname, availability_zone,\n master_instance_type, slave_instance_type,\n num_instances, keep_alive, hadoop_version)\n params.update(instance_params)\n\n # Debugging step from EMR API docs\n if enable_debugging:\n debugging_step = JarStep(name='Setup Hadoop Debugging',\n action_on_failure='TERMINATE_JOB_FLOW',\n main_class=None,\n jar=self.DebuggingJar,\n step_args=self.DebuggingArgs)\n steps.insert(0, debugging_step)\n\n # Step args\n if steps:\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n if bootstrap_actions:\n bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]\n params.update(self._build_bootstrap_action_list(bootstrap_action_args))\n\n response = self.get_object('RunJobFlow', params, RunJobFlowResponse)\n return response.jobflowid\n\n def _build_bootstrap_action_args(self, bootstrap_action):\n bootstrap_action_params = {}\n bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path\n\n try:\n bootstrap_action_params['Name'] = bootstrap_action.name\n except AttributeError:\n pass\n\n args = bootstrap_action.args()\n if args:\n self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')\n\n return bootstrap_action_params\n\n def _build_step_args(self, step):\n step_params = {}\n step_params['ActionOnFailure'] = step.action_on_failure\n step_params['HadoopJarStep.Jar'] = step.jar()\n\n main_class = step.main_class()\n if main_class:\n step_params['HadoopJarStep.MainClass'] = main_class\n\n args = step.args()\n if args:\n self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')\n\n step_params['Name'] = step.name\n return step_params\n\n def _build_bootstrap_action_list(self, bootstrap_actions):\n if type(bootstrap_actions) != types.ListType:\n bootstrap_actions = [bootstrap_actions]\n\n params = {}\n for i, bootstrap_action in enumerate(bootstrap_actions):\n for key, value in bootstrap_action.iteritems():\n params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value\n return params\n\n def _build_step_list(self, steps):\n if type(steps) != types.ListType:\n steps = [steps]\n\n params = {}\n for i, step in enumerate(steps):\n for key, value in step.iteritems():\n params['Steps.member.%s.%s' % (i+1, key)] = value\n return params\n\n def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,\n slave_instance_type, num_instances, keep_alive, hadoop_version):\n params = {\n 'Instances.MasterInstanceType' : master_instance_type,\n 'Instances.SlaveInstanceType' : slave_instance_type,\n 'Instances.InstanceCount' : num_instances,\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\n 'Instances.HadoopVersion' : hadoop_version\n }\n\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n params['Placement.AvailabilityZone'] = availability_zone\n\n return params\n\n", "path": "boto/emr/connection.py"}]} | 3,492 | 104 |
gh_patches_debug_34858 | rasdani/github-patches | git_diff | ansible-collections__amazon.aws-430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws_service_ip_ranges suppport for ipv6
### Summary
We are using amazon.aws collection and we noticed that the aws_service_ip_ranges does not have an option to return IPv6 ranges.
### Issue Type
Feature Idea
### Component Name
`{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }`
Should return a list of IPv6 addresses that correspond to the Route53 health check.
### Pull Request
#430
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```
vars:
rt53_ranges: "{{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for x in rt53_ranges %}{{ x }} {% endfor %}"
# ###"2600:1f14:7ff:f800::/56,2600:1f14:fff:f800::/56"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/lookup/aws_service_ip_ranges.py`
Content:
```
1 # (c) 2016 James Turner <[email protected]>
2 # (c) 2017 Ansible Project
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4 from __future__ import (absolute_import, division, print_function)
5 __metaclass__ = type
6
7 DOCUMENTATION = '''
8 lookup: aws_service_ip_ranges
9 author:
10 - James Turner <[email protected]>
11 requirements:
12 - must have public internet connectivity
13 short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
14 description:
15 - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
16 - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
17 options:
18 service:
19 description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
20 region:
21 description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
22 '''
23
24 EXAMPLES = """
25 vars:
26 ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
27 tasks:
28
29 - name: "use list return option and iterate as a loop"
30 debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
31 # "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
32
33 - name: "Pull S3 IP ranges, and print the default return style"
34 debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
35 # "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
36 """
37
38 RETURN = """
39 _raw:
40 description: comma-separated list of CIDR ranges
41 """
42
43
44 import json
45
46 from ansible.errors import AnsibleError
47 from ansible.module_utils.six.moves.urllib.error import HTTPError
48 from ansible.module_utils.six.moves.urllib.error import URLError
49 from ansible.module_utils._text import to_native
50 from ansible.module_utils.urls import ConnectionError
51 from ansible.module_utils.urls import open_url
52 from ansible.module_utils.urls import SSLValidationError
53 from ansible.plugins.lookup import LookupBase
54
55
56 class LookupModule(LookupBase):
57 def run(self, terms, variables, **kwargs):
58 try:
59 resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
60 amazon_response = json.load(resp)['prefixes']
61 except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
62 # on Python 3+, json.decoder.JSONDecodeError is raised for bad
63 # JSON. On 2.x it's a ValueError
64 raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
65 except HTTPError as e:
66 raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
67 except SSLValidationError as e:
68 raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
69 except URLError as e:
70 raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
71 except ConnectionError as e:
72 raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
73
74 if 'region' in kwargs:
75 region = kwargs['region']
76 amazon_response = (item for item in amazon_response if item['region'] == region)
77 if 'service' in kwargs:
78 service = str.upper(kwargs['service'])
79 amazon_response = (item for item in amazon_response if item['service'] == service)
80
81 return [item['ip_prefix'] for item in amazon_response]
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py
--- a/plugins/lookup/aws_service_ip_ranges.py
+++ b/plugins/lookup/aws_service_ip_ranges.py
@@ -19,6 +19,9 @@
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
+ ipv6_prefixes:
+ description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'
+ version_added: 2.1.0
'''
EXAMPLES = """
@@ -40,7 +43,6 @@
description: comma-separated list of CIDR ranges
"""
-
import json
from ansible.errors import AnsibleError
@@ -55,9 +57,16 @@
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
+ if "ipv6_prefixes" in kwargs and kwargs["ipv6_prefixes"]:
+ prefixes_label = "ipv6_prefixes"
+ ip_prefix_label = "ipv6_prefix"
+ else:
+ prefixes_label = "prefixes"
+ ip_prefix_label = "ip_prefix"
+
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
- amazon_response = json.load(resp)['prefixes']
+ amazon_response = json.load(resp)[prefixes_label]
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
@@ -77,5 +86,5 @@
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
-
- return [item['ip_prefix'] for item in amazon_response]
+ iprange = [item[ip_prefix_label] for item in amazon_response]
+ return iprange
| {"golden_diff": "diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py\n--- a/plugins/lookup/aws_service_ip_ranges.py\n+++ b/plugins/lookup/aws_service_ip_ranges.py\n@@ -19,6 +19,9 @@\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n+ ipv6_prefixes:\n+ description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'\n+ version_added: 2.1.0\n '''\n \n EXAMPLES = \"\"\"\n@@ -40,7 +43,6 @@\n description: comma-separated list of CIDR ranges\n \"\"\"\n \n-\n import json\n \n from ansible.errors import AnsibleError\n@@ -55,9 +57,16 @@\n \n class LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n+ if \"ipv6_prefixes\" in kwargs and kwargs[\"ipv6_prefixes\"]:\n+ prefixes_label = \"ipv6_prefixes\"\n+ ip_prefix_label = \"ipv6_prefix\"\n+ else:\n+ prefixes_label = \"prefixes\"\n+ ip_prefix_label = \"ip_prefix\"\n+\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n- amazon_response = json.load(resp)['prefixes']\n+ amazon_response = json.load(resp)[prefixes_label]\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n@@ -77,5 +86,5 @@\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n-\n- return [item['ip_prefix'] for item in amazon_response]\n+ iprange = [item[ip_prefix_label] for item in amazon_response]\n+ return iprange\n", "issue": "aws_service_ip_ranges suppport for ipv6\n### Summary\r\n\r\nWe are using amazon.aws collection and we noticed that the aws_service_ip_ranges does not have an option to return IPv6 ranges.\r\n\r\n### Issue Type\r\n\r\nFeature Idea\r\n\r\n### Component Name\r\n\r\n`{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }`\r\nShould return a list of IPv6 addresses that correspond to the Route53 health check. \r\n\r\n\r\n### Pull Request\r\n#430\r\n\r\n### Additional Information\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\nvars:\r\n rt53_ranges: \"{{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }}\"\r\ntasks:\r\n\r\n- name: \"use list return option and iterate as a loop\"\r\n debug: msg=\"{% for x in rt53_ranges %}{{ x }} {% endfor %}\"\r\n# ###\"2600:1f14:7ff:f800::/56,2600:1f14:fff:f800::/56\"\r\n```\r\n\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# (c) 2016 James Turner <[email protected]>\n# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nlookup: aws_service_ip_ranges\nauthor:\n - James Turner <[email protected]>\nrequirements:\n - must have public internet connectivity\nshort_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.\ndescription:\n - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.\n - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.\noptions:\n service:\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n'''\n\nEXAMPLES = \"\"\"\nvars:\n ec2_ranges: \"{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}\"\ntasks:\n\n- name: \"use list return option and iterate as a loop\"\n debug: msg=\"{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}\"\n# \"52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 \"\n\n- name: \"Pull S3 IP ranges, and print the default return style\"\n debug: msg=\"{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}\"\n# \"52.92.16.0/20,52.216.0.0/15,54.231.0.0/17\"\n\"\"\"\n\nRETURN = \"\"\"\n_raw:\n description: comma-separated list of CIDR ranges\n\"\"\"\n\n\nimport json\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six.moves.urllib.error import HTTPError\nfrom ansible.module_utils.six.moves.urllib.error import URLError\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.urls import ConnectionError\nfrom ansible.module_utils.urls import open_url\nfrom ansible.module_utils.urls import SSLValidationError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n amazon_response = json.load(resp)['prefixes']\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n raise AnsibleError(\"Could not decode AWS IP ranges: %s\" % to_native(e))\n except HTTPError as e:\n raise AnsibleError(\"Received HTTP error while pulling IP ranges: %s\" % to_native(e))\n except SSLValidationError as e:\n raise AnsibleError(\"Error validating the server's certificate for: %s\" % to_native(e))\n except URLError as e:\n raise AnsibleError(\"Failed look up IP range service: %s\" % to_native(e))\n except ConnectionError as e:\n raise AnsibleError(\"Error connecting to IP range service: %s\" % to_native(e))\n\n if 'region' in kwargs:\n region = kwargs['region']\n amazon_response = (item for item in amazon_response if item['region'] == region)\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n\n return [item['ip_prefix'] for item in amazon_response]\n", "path": "plugins/lookup/aws_service_ip_ranges.py"}], "after_files": [{"content": "# (c) 2016 James Turner <[email protected]>\n# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nlookup: aws_service_ip_ranges\nauthor:\n - James Turner <[email protected]>\nrequirements:\n - must have public internet connectivity\nshort_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.\ndescription:\n - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.\n - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.\noptions:\n service:\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n ipv6_prefixes:\n description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'\n version_added: 2.1.0\n'''\n\nEXAMPLES = \"\"\"\nvars:\n ec2_ranges: \"{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}\"\ntasks:\n\n- name: \"use list return option and iterate as a loop\"\n debug: msg=\"{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}\"\n# \"52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 \"\n\n- name: \"Pull S3 IP ranges, and print the default return style\"\n debug: msg=\"{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}\"\n# \"52.92.16.0/20,52.216.0.0/15,54.231.0.0/17\"\n\"\"\"\n\nRETURN = \"\"\"\n_raw:\n description: comma-separated list of CIDR ranges\n\"\"\"\n\nimport json\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six.moves.urllib.error import HTTPError\nfrom ansible.module_utils.six.moves.urllib.error import URLError\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.urls import ConnectionError\nfrom ansible.module_utils.urls import open_url\nfrom ansible.module_utils.urls import SSLValidationError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n if \"ipv6_prefixes\" in kwargs and kwargs[\"ipv6_prefixes\"]:\n prefixes_label = \"ipv6_prefixes\"\n ip_prefix_label = \"ipv6_prefix\"\n else:\n prefixes_label = \"prefixes\"\n ip_prefix_label = \"ip_prefix\"\n\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n amazon_response = json.load(resp)[prefixes_label]\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n raise AnsibleError(\"Could not decode AWS IP ranges: %s\" % to_native(e))\n except HTTPError as e:\n raise AnsibleError(\"Received HTTP error while pulling IP ranges: %s\" % to_native(e))\n except SSLValidationError as e:\n raise AnsibleError(\"Error validating the server's certificate for: %s\" % to_native(e))\n except URLError as e:\n raise AnsibleError(\"Failed look up IP range service: %s\" % to_native(e))\n except ConnectionError as e:\n raise AnsibleError(\"Error connecting to IP range service: %s\" % to_native(e))\n\n if 'region' in kwargs:\n region = kwargs['region']\n amazon_response = (item for item in amazon_response if item['region'] == region)\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n iprange = [item[ip_prefix_label] for item in amazon_response]\n return iprange\n", "path": "plugins/lookup/aws_service_ip_ranges.py"}]} | 1,679 | 499 |
gh_patches_debug_32086 | rasdani/github-patches | git_diff | pypa__cibuildwheel-408 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CIBW_ENVIRONMENT broken in 1.5.3
Somehow, my line of `export CIBW_ENVIRONMENT='... CCACHE_BASEDIR=`python -c "import tempfile; import os; print(os.path.realpath(tempfile.gettempdir()))"` ...'` (see https://github.com/YannickJadoul/Parselmouth/blob/1f3dd9abc63afdf4a1d26db40d210bb4187118a8/.travis.yml#L143; yes, I know I ought to clean this up) breaks:
```
+ /bin/true
+ mkdir -p /project
+ /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
+ uname -i
x86_64
+ python -c import 'tempfile;' import 'os;' 'print(os.path.realpath(tempfile.gettempdir()))'
File "<string>", line 1
import
```
See https://travis-ci.org/github/YannickJadoul/Parselmouth/jobs/709679114#L392
I'll start investigating, but is this perhaps related to #403, @joerick?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cibuildwheel/bashlex_eval.py`
Content:
```
1 import shlex
2 import subprocess
3 from typing import Callable, Dict, List, NamedTuple, Optional, Sequence
4
5 import bashlex # type: ignore
6
7 # a function that takes a shell command and the environment, and returns the result
8 EnvironmentExecutor = Callable[[str, Dict[str, str]], str]
9
10
11 def local_environment_executor(command: str, env: Dict[str, str]) -> str:
12 return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)
13
14
15 class NodeExecutionContext(NamedTuple):
16 environment: Dict[str, str]
17 input: str
18 executor: EnvironmentExecutor
19
20
21 def evaluate(value: str, environment: Dict[str, str], executor: Optional[EnvironmentExecutor] = None) -> str:
22 if not value:
23 # empty string evaluates to empty string
24 # (but trips up bashlex)
25 return ''
26
27 command_node = bashlex.parsesingle(value)
28
29 if len(command_node.parts) != 1:
30 raise ValueError(f'"{value}" has too many parts')
31
32 value_word_node = command_node.parts[0]
33
34 return evaluate_node(
35 value_word_node,
36 context=NodeExecutionContext(environment=environment, input=value, executor=executor or local_environment_executor)
37 )
38
39
40 def evaluate_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
41 if node.kind == 'word':
42 return evaluate_word_node(node, context=context)
43 elif node.kind == 'commandsubstitution':
44 node_result = evaluate_command_node(node.command, context=context)
45 # bash removes training newlines in command substitution
46 return node_result.rstrip()
47 elif node.kind == 'parameter':
48 return evaluate_parameter_node(node, context=context)
49 else:
50 raise ValueError(f'Unsupported bash construct: "{node.kind}"')
51
52
53 def evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
54 value = node.word
55
56 for part in node.parts:
57 part_string = context.input[part.pos[0]:part.pos[1]]
58 part_value = evaluate_node(part, context=context)
59
60 if part_string not in value:
61 raise RuntimeError(
62 'bash parse failed. part "{}" not found in "{}". Word was "{}". Full input was "{}"'.format(
63 part_string, value, node.word, context.input,
64 )
65 )
66
67 value = value.replace(part_string, part_value, 1)
68
69 return value
70
71
72 def evaluate_command_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
73 if any(n.kind == 'operator' for n in node.parts):
74 return evaluate_nodes_as_compound_command(node.parts, context=context)
75 else:
76 return evaluate_nodes_as_simple_command(node.parts, context=context)
77
78
79 def evaluate_nodes_as_compound_command(nodes: Sequence[bashlex.ast.node], context: NodeExecutionContext) -> str:
80 # bashlex doesn't support any operators besides ';' inside command
81 # substitutions, so we only need to handle that case. We do so assuming
82 # that `set -o errexit` is on, because it's easier to code!
83
84 result = ''
85 for node in nodes:
86 if node.kind == 'command':
87 result += evaluate_command_node(node, context=context)
88 elif node.kind == 'operator':
89 if node.op == ';':
90 pass
91 else:
92 raise ValueError(f'Unsupported bash operator: "{node.op}"')
93 else:
94 raise ValueError(f'Unsupported bash node in compound command: "{node.kind}"')
95
96 return result
97
98
99 def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):
100 words = [evaluate_node(part, context=context) for part in nodes]
101 command = ' '.join(words)
102 return context.executor(command, context.environment)
103
104
105 def evaluate_parameter_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
106 return context.environment.get(node.value, '')
107
```
Path: `cibuildwheel/docker_container.py`
Content:
```
1 import io
2 import json
3 import os
4 import shlex
5 import subprocess
6 import sys
7 import uuid
8 from os import PathLike
9 from pathlib import Path, PurePath
10 from typing import IO, Dict, List, Optional, Sequence, Union
11
12
13 class DockerContainer:
14 '''
15 An object that represents a running Docker container.
16
17 Intended for use as a context manager e.g.
18 `with DockerContainer('ubuntu') as docker:`
19
20 A bash shell is running in the remote container. When `call()` is invoked,
21 the command is relayed to the remote shell, and the results are streamed
22 back to cibuildwheel.
23 '''
24 UTILITY_PYTHON = '/opt/python/cp38-cp38/bin/python'
25
26 process: subprocess.Popen
27 bash_stdin: IO[bytes]
28 bash_stdout: IO[bytes]
29
30 def __init__(self, docker_image: str, simulate_32_bit=False):
31 self.docker_image = docker_image
32 self.simulate_32_bit = simulate_32_bit
33
34 def __enter__(self) -> 'DockerContainer':
35 self.name = f'cibuildwheel-{uuid.uuid4()}'
36 shell_args = ['linux32', '/bin/bash'] if self.simulate_32_bit else ['/bin/bash']
37 subprocess.run(
38 [
39 'docker', 'create',
40 '--env', 'CIBUILDWHEEL',
41 '--name', self.name,
42 '-i',
43 '-v', '/:/host', # ignored on CircleCI
44 self.docker_image,
45 *shell_args
46 ],
47 check=True,
48 )
49 self.process = subprocess.Popen(
50 [
51 'docker', 'start',
52 '--attach', '--interactive',
53 self.name,
54 ],
55 stdin=subprocess.PIPE,
56 stdout=subprocess.PIPE,
57 )
58
59 assert self.process.stdin and self.process.stdout
60 self.bash_stdin = self.process.stdin
61 self.bash_stdout = self.process.stdout
62
63 # run a noop command to block until the container is responding
64 self.call(['/bin/true'])
65
66 return self
67
68 def __exit__(self, exc_type, exc_val, exc_tb):
69 self.bash_stdin.close()
70 self.process.terminate()
71 self.process.wait()
72
73 subprocess.run(['docker', 'rm', '--force', '-v', self.name])
74 self.name = None
75
76 def copy_into(self, from_path: Path, to_path: PurePath) -> None:
77 # `docker cp` causes 'no space left on device' error when
78 # a container is running and the host filesystem is
79 # mounted. https://github.com/moby/moby/issues/38995
80 # Use `docker exec` instead.
81
82 if from_path.is_dir():
83 self.call(['mkdir', '-p', to_path])
84 subprocess.run(
85 f'tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -',
86 shell=True,
87 check=True,
88 cwd=from_path)
89 else:
90 subprocess.run(
91 f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c "cat > {shell_quote(to_path)}"',
92 shell=True,
93 check=True)
94
95 def copy_out(self, from_path: PurePath, to_path: Path) -> None:
96 # note: we assume from_path is a dir
97 to_path.mkdir(parents=True, exist_ok=True)
98
99 subprocess.run(
100 f'docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -',
101 shell=True,
102 check=True,
103 cwd=to_path
104 )
105
106 def glob(self, path: PurePath, pattern: str) -> List[PurePath]:
107 glob_pattern = os.path.join(str(path), pattern)
108
109 path_strs = json.loads(self.call([
110 self.UTILITY_PYTHON,
111 '-c',
112 f'import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)'
113 ], capture_output=True))
114
115 return [PurePath(p) for p in path_strs]
116
117 def call(self, args: Sequence[Union[str, PathLike]], env: Dict[str, str] = {},
118 capture_output=False, cwd: Optional[Union[str, PathLike]] = None) -> str:
119 chdir = f'cd {cwd}' if cwd else ''
120 env_assignments = ' '.join(f'{shlex.quote(k)}={shlex.quote(v)}'
121 for k, v in env.items())
122 command = ' '.join(shlex.quote(str(a)) for a in args)
123 end_of_message = str(uuid.uuid4())
124
125 # log the command we're executing
126 print(f' + {command}')
127
128 # Write a command to the remote shell. First we change the
129 # cwd, if that's required. Then, we use the `env` utility to run
130 # `command` inside the specified environment. We use `env` because it
131 # can cope with spaces and strange characters in the name or value.
132 # Finally, the remote shell is told to write a footer - this will show
133 # up in the output so we know when to stop reading, and will include
134 # the returncode of `command`.
135 self.bash_stdin.write(bytes(f'''(
136 {chdir}
137 env {env_assignments} {command}
138 printf "%04d%s\n" $? {end_of_message}
139 )
140 ''', encoding='utf8', errors='surrogateescape'))
141 self.bash_stdin.flush()
142
143 if capture_output:
144 output_io: IO[bytes] = io.BytesIO()
145 else:
146 output_io = sys.stdout.buffer
147
148 while True:
149 line = self.bash_stdout.readline()
150
151 if line.endswith(b'%s\n' % (bytes(end_of_message, encoding='utf8'))):
152 footer_offset = (
153 len(line)
154 - 1 # newline character
155 - len(end_of_message) # delimiter
156 - 4 # 4 returncode decimals
157 )
158 returncode_str = line[footer_offset:footer_offset+4]
159 returncode = int(returncode_str)
160 # add the last line to output, without the footer
161 output_io.write(line[0:footer_offset])
162 break
163 else:
164 output_io.write(line)
165
166 if isinstance(output_io, io.BytesIO):
167 output = str(output_io.getvalue(), encoding='utf8', errors='surrogateescape')
168 else:
169 output = ''
170
171 if returncode != 0:
172 raise subprocess.CalledProcessError(returncode, args, output)
173
174 return output
175
176 def get_environment(self) -> Dict[str, str]:
177 return json.loads(self.call([
178 self.UTILITY_PYTHON,
179 '-c',
180 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
181 ], capture_output=True))
182
183 def environment_executor(self, command: str, environment: Dict[str, str]) -> str:
184 # used as an EnvironmentExecutor to evaluate commands and capture output
185 return self.call(shlex.split(command), env=environment)
186
187
188 def shell_quote(path: PurePath) -> str:
189 return shlex.quote(str(path))
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cibuildwheel/bashlex_eval.py b/cibuildwheel/bashlex_eval.py
--- a/cibuildwheel/bashlex_eval.py
+++ b/cibuildwheel/bashlex_eval.py
@@ -1,15 +1,14 @@
-import shlex
import subprocess
from typing import Callable, Dict, List, NamedTuple, Optional, Sequence
import bashlex # type: ignore
-# a function that takes a shell command and the environment, and returns the result
-EnvironmentExecutor = Callable[[str, Dict[str, str]], str]
+# a function that takes a command and the environment, and returns the result
+EnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]
-def local_environment_executor(command: str, env: Dict[str, str]) -> str:
- return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)
+def local_environment_executor(command: List[str], env: Dict[str, str]) -> str:
+ return subprocess.check_output(command, env=env, universal_newlines=True)
class NodeExecutionContext(NamedTuple):
@@ -97,8 +96,7 @@
def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):
- words = [evaluate_node(part, context=context) for part in nodes]
- command = ' '.join(words)
+ command = [evaluate_node(part, context=context) for part in nodes]
return context.executor(command, context.environment)
diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py
--- a/cibuildwheel/docker_container.py
+++ b/cibuildwheel/docker_container.py
@@ -180,9 +180,9 @@
'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
], capture_output=True))
- def environment_executor(self, command: str, environment: Dict[str, str]) -> str:
+ def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:
# used as an EnvironmentExecutor to evaluate commands and capture output
- return self.call(shlex.split(command), env=environment)
+ return self.call(command, env=environment)
def shell_quote(path: PurePath) -> str:
| {"golden_diff": "diff --git a/cibuildwheel/bashlex_eval.py b/cibuildwheel/bashlex_eval.py\n--- a/cibuildwheel/bashlex_eval.py\n+++ b/cibuildwheel/bashlex_eval.py\n@@ -1,15 +1,14 @@\n-import shlex\n import subprocess\n from typing import Callable, Dict, List, NamedTuple, Optional, Sequence\n \n import bashlex # type: ignore\n \n-# a function that takes a shell command and the environment, and returns the result\n-EnvironmentExecutor = Callable[[str, Dict[str, str]], str]\n+# a function that takes a command and the environment, and returns the result\n+EnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]\n \n \n-def local_environment_executor(command: str, env: Dict[str, str]) -> str:\n- return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)\n+def local_environment_executor(command: List[str], env: Dict[str, str]) -> str:\n+ return subprocess.check_output(command, env=env, universal_newlines=True)\n \n \n class NodeExecutionContext(NamedTuple):\n@@ -97,8 +96,7 @@\n \n \n def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):\n- words = [evaluate_node(part, context=context) for part in nodes]\n- command = ' '.join(words)\n+ command = [evaluate_node(part, context=context) for part in nodes]\n return context.executor(command, context.environment)\n \n \ndiff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py\n--- a/cibuildwheel/docker_container.py\n+++ b/cibuildwheel/docker_container.py\n@@ -180,9 +180,9 @@\n 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\n ], capture_output=True))\n \n- def environment_executor(self, command: str, environment: Dict[str, str]) -> str:\n+ def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n- return self.call(shlex.split(command), env=environment)\n+ return self.call(command, env=environment)\n \n \n def shell_quote(path: PurePath) -> str:\n", "issue": "CIBW_ENVIRONMENT broken in 1.5.3\nSomehow, my line of `export CIBW_ENVIRONMENT='... CCACHE_BASEDIR=`python -c \"import tempfile; import os; print(os.path.realpath(tempfile.gettempdir()))\"` ...'` (see https://github.com/YannickJadoul/Parselmouth/blob/1f3dd9abc63afdf4a1d26db40d210bb4187118a8/.travis.yml#L143; yes, I know I ought to clean this up) breaks:\r\n\r\n```\r\n + /bin/true\r\n + mkdir -p /project\r\n + /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\r\n + uname -i\r\nx86_64\r\n + python -c import 'tempfile;' import 'os;' 'print(os.path.realpath(tempfile.gettempdir()))'\r\n File \"<string>\", line 1\r\n import\r\n```\r\n\r\nSee https://travis-ci.org/github/YannickJadoul/Parselmouth/jobs/709679114#L392\r\n\r\nI'll start investigating, but is this perhaps related to #403, @joerick?\n", "before_files": [{"content": "import shlex\nimport subprocess\nfrom typing import Callable, Dict, List, NamedTuple, Optional, Sequence\n\nimport bashlex # type: ignore\n\n# a function that takes a shell command and the environment, and returns the result\nEnvironmentExecutor = Callable[[str, Dict[str, str]], str]\n\n\ndef local_environment_executor(command: str, env: Dict[str, str]) -> str:\n return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)\n\n\nclass NodeExecutionContext(NamedTuple):\n environment: Dict[str, str]\n input: str\n executor: EnvironmentExecutor\n\n\ndef evaluate(value: str, environment: Dict[str, str], executor: Optional[EnvironmentExecutor] = None) -> str:\n if not value:\n # empty string evaluates to empty string\n # (but trips up bashlex)\n return ''\n\n command_node = bashlex.parsesingle(value)\n\n if len(command_node.parts) != 1:\n raise ValueError(f'\"{value}\" has too many parts')\n\n value_word_node = command_node.parts[0]\n\n return evaluate_node(\n value_word_node,\n context=NodeExecutionContext(environment=environment, input=value, executor=executor or local_environment_executor)\n )\n\n\ndef evaluate_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n if node.kind == 'word':\n return evaluate_word_node(node, context=context)\n elif node.kind == 'commandsubstitution':\n node_result = evaluate_command_node(node.command, context=context)\n # bash removes training newlines in command substitution\n return node_result.rstrip()\n elif node.kind == 'parameter':\n return evaluate_parameter_node(node, context=context)\n else:\n raise ValueError(f'Unsupported bash construct: \"{node.kind}\"')\n\n\ndef evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n value = node.word\n\n for part in node.parts:\n part_string = context.input[part.pos[0]:part.pos[1]]\n part_value = evaluate_node(part, context=context)\n\n if part_string not in value:\n raise RuntimeError(\n 'bash parse failed. part \"{}\" not found in \"{}\". Word was \"{}\". Full input was \"{}\"'.format(\n part_string, value, node.word, context.input,\n )\n )\n\n value = value.replace(part_string, part_value, 1)\n\n return value\n\n\ndef evaluate_command_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n if any(n.kind == 'operator' for n in node.parts):\n return evaluate_nodes_as_compound_command(node.parts, context=context)\n else:\n return evaluate_nodes_as_simple_command(node.parts, context=context)\n\n\ndef evaluate_nodes_as_compound_command(nodes: Sequence[bashlex.ast.node], context: NodeExecutionContext) -> str:\n # bashlex doesn't support any operators besides ';' inside command\n # substitutions, so we only need to handle that case. We do so assuming\n # that `set -o errexit` is on, because it's easier to code!\n\n result = ''\n for node in nodes:\n if node.kind == 'command':\n result += evaluate_command_node(node, context=context)\n elif node.kind == 'operator':\n if node.op == ';':\n pass\n else:\n raise ValueError(f'Unsupported bash operator: \"{node.op}\"')\n else:\n raise ValueError(f'Unsupported bash node in compound command: \"{node.kind}\"')\n\n return result\n\n\ndef evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):\n words = [evaluate_node(part, context=context) for part in nodes]\n command = ' '.join(words)\n return context.executor(command, context.environment)\n\n\ndef evaluate_parameter_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n return context.environment.get(node.value, '')\n", "path": "cibuildwheel/bashlex_eval.py"}, {"content": "import io\nimport json\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport uuid\nfrom os import PathLike\nfrom pathlib import Path, PurePath\nfrom typing import IO, Dict, List, Optional, Sequence, Union\n\n\nclass DockerContainer:\n '''\n An object that represents a running Docker container.\n\n Intended for use as a context manager e.g.\n `with DockerContainer('ubuntu') as docker:`\n\n A bash shell is running in the remote container. When `call()` is invoked,\n the command is relayed to the remote shell, and the results are streamed\n back to cibuildwheel.\n '''\n UTILITY_PYTHON = '/opt/python/cp38-cp38/bin/python'\n\n process: subprocess.Popen\n bash_stdin: IO[bytes]\n bash_stdout: IO[bytes]\n\n def __init__(self, docker_image: str, simulate_32_bit=False):\n self.docker_image = docker_image\n self.simulate_32_bit = simulate_32_bit\n\n def __enter__(self) -> 'DockerContainer':\n self.name = f'cibuildwheel-{uuid.uuid4()}'\n shell_args = ['linux32', '/bin/bash'] if self.simulate_32_bit else ['/bin/bash']\n subprocess.run(\n [\n 'docker', 'create',\n '--env', 'CIBUILDWHEEL',\n '--name', self.name,\n '-i',\n '-v', '/:/host', # ignored on CircleCI\n self.docker_image,\n *shell_args\n ],\n check=True,\n )\n self.process = subprocess.Popen(\n [\n 'docker', 'start',\n '--attach', '--interactive',\n self.name,\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n\n assert self.process.stdin and self.process.stdout\n self.bash_stdin = self.process.stdin\n self.bash_stdout = self.process.stdout\n\n # run a noop command to block until the container is responding\n self.call(['/bin/true'])\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.bash_stdin.close()\n self.process.terminate()\n self.process.wait()\n\n subprocess.run(['docker', 'rm', '--force', '-v', self.name])\n self.name = None\n\n def copy_into(self, from_path: Path, to_path: PurePath) -> None:\n # `docker cp` causes 'no space left on device' error when\n # a container is running and the host filesystem is\n # mounted. https://github.com/moby/moby/issues/38995\n # Use `docker exec` instead.\n\n if from_path.is_dir():\n self.call(['mkdir', '-p', to_path])\n subprocess.run(\n f'tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -',\n shell=True,\n check=True,\n cwd=from_path)\n else:\n subprocess.run(\n f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c \"cat > {shell_quote(to_path)}\"',\n shell=True,\n check=True)\n\n def copy_out(self, from_path: PurePath, to_path: Path) -> None:\n # note: we assume from_path is a dir\n to_path.mkdir(parents=True, exist_ok=True)\n\n subprocess.run(\n f'docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -',\n shell=True,\n check=True,\n cwd=to_path\n )\n\n def glob(self, path: PurePath, pattern: str) -> List[PurePath]:\n glob_pattern = os.path.join(str(path), pattern)\n\n path_strs = json.loads(self.call([\n self.UTILITY_PYTHON,\n '-c',\n f'import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)'\n ], capture_output=True))\n\n return [PurePath(p) for p in path_strs]\n\n def call(self, args: Sequence[Union[str, PathLike]], env: Dict[str, str] = {},\n capture_output=False, cwd: Optional[Union[str, PathLike]] = None) -> str:\n chdir = f'cd {cwd}' if cwd else ''\n env_assignments = ' '.join(f'{shlex.quote(k)}={shlex.quote(v)}'\n for k, v in env.items())\n command = ' '.join(shlex.quote(str(a)) for a in args)\n end_of_message = str(uuid.uuid4())\n\n # log the command we're executing\n print(f' + {command}')\n\n # Write a command to the remote shell. First we change the\n # cwd, if that's required. Then, we use the `env` utility to run\n # `command` inside the specified environment. We use `env` because it\n # can cope with spaces and strange characters in the name or value.\n # Finally, the remote shell is told to write a footer - this will show\n # up in the output so we know when to stop reading, and will include\n # the returncode of `command`.\n self.bash_stdin.write(bytes(f'''(\n {chdir}\n env {env_assignments} {command}\n printf \"%04d%s\\n\" $? {end_of_message}\n )\n ''', encoding='utf8', errors='surrogateescape'))\n self.bash_stdin.flush()\n\n if capture_output:\n output_io: IO[bytes] = io.BytesIO()\n else:\n output_io = sys.stdout.buffer\n\n while True:\n line = self.bash_stdout.readline()\n\n if line.endswith(b'%s\\n' % (bytes(end_of_message, encoding='utf8'))):\n footer_offset = (\n len(line)\n - 1 # newline character\n - len(end_of_message) # delimiter\n - 4 # 4 returncode decimals\n )\n returncode_str = line[footer_offset:footer_offset+4]\n returncode = int(returncode_str)\n # add the last line to output, without the footer\n output_io.write(line[0:footer_offset])\n break\n else:\n output_io.write(line)\n\n if isinstance(output_io, io.BytesIO):\n output = str(output_io.getvalue(), encoding='utf8', errors='surrogateescape')\n else:\n output = ''\n\n if returncode != 0:\n raise subprocess.CalledProcessError(returncode, args, output)\n\n return output\n\n def get_environment(self) -> Dict[str, str]:\n return json.loads(self.call([\n self.UTILITY_PYTHON,\n '-c',\n 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\n ], capture_output=True))\n\n def environment_executor(self, command: str, environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n return self.call(shlex.split(command), env=environment)\n\n\ndef shell_quote(path: PurePath) -> str:\n return shlex.quote(str(path))\n", "path": "cibuildwheel/docker_container.py"}], "after_files": [{"content": "import subprocess\nfrom typing import Callable, Dict, List, NamedTuple, Optional, Sequence\n\nimport bashlex # type: ignore\n\n# a function that takes a command and the environment, and returns the result\nEnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]\n\n\ndef local_environment_executor(command: List[str], env: Dict[str, str]) -> str:\n return subprocess.check_output(command, env=env, universal_newlines=True)\n\n\nclass NodeExecutionContext(NamedTuple):\n environment: Dict[str, str]\n input: str\n executor: EnvironmentExecutor\n\n\ndef evaluate(value: str, environment: Dict[str, str], executor: Optional[EnvironmentExecutor] = None) -> str:\n if not value:\n # empty string evaluates to empty string\n # (but trips up bashlex)\n return ''\n\n command_node = bashlex.parsesingle(value)\n\n if len(command_node.parts) != 1:\n raise ValueError(f'\"{value}\" has too many parts')\n\n value_word_node = command_node.parts[0]\n\n return evaluate_node(\n value_word_node,\n context=NodeExecutionContext(environment=environment, input=value, executor=executor or local_environment_executor)\n )\n\n\ndef evaluate_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n if node.kind == 'word':\n return evaluate_word_node(node, context=context)\n elif node.kind == 'commandsubstitution':\n node_result = evaluate_command_node(node.command, context=context)\n # bash removes training newlines in command substitution\n return node_result.rstrip()\n elif node.kind == 'parameter':\n return evaluate_parameter_node(node, context=context)\n else:\n raise ValueError(f'Unsupported bash construct: \"{node.kind}\"')\n\n\ndef evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n value = node.word\n\n for part in node.parts:\n part_string = context.input[part.pos[0]:part.pos[1]]\n part_value = evaluate_node(part, context=context)\n\n if part_string not in value:\n raise RuntimeError(\n 'bash parse failed. part \"{}\" not found in \"{}\". Word was \"{}\". Full input was \"{}\"'.format(\n part_string, value, node.word, context.input,\n )\n )\n\n value = value.replace(part_string, part_value, 1)\n\n return value\n\n\ndef evaluate_command_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n if any(n.kind == 'operator' for n in node.parts):\n return evaluate_nodes_as_compound_command(node.parts, context=context)\n else:\n return evaluate_nodes_as_simple_command(node.parts, context=context)\n\n\ndef evaluate_nodes_as_compound_command(nodes: Sequence[bashlex.ast.node], context: NodeExecutionContext) -> str:\n # bashlex doesn't support any operators besides ';' inside command\n # substitutions, so we only need to handle that case. We do so assuming\n # that `set -o errexit` is on, because it's easier to code!\n\n result = ''\n for node in nodes:\n if node.kind == 'command':\n result += evaluate_command_node(node, context=context)\n elif node.kind == 'operator':\n if node.op == ';':\n pass\n else:\n raise ValueError(f'Unsupported bash operator: \"{node.op}\"')\n else:\n raise ValueError(f'Unsupported bash node in compound command: \"{node.kind}\"')\n\n return result\n\n\ndef evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):\n command = [evaluate_node(part, context=context) for part in nodes]\n return context.executor(command, context.environment)\n\n\ndef evaluate_parameter_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n return context.environment.get(node.value, '')\n", "path": "cibuildwheel/bashlex_eval.py"}, {"content": "import io\nimport json\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport uuid\nfrom os import PathLike\nfrom pathlib import Path, PurePath\nfrom typing import IO, Dict, List, Optional, Sequence, Union\n\n\nclass DockerContainer:\n '''\n An object that represents a running Docker container.\n\n Intended for use as a context manager e.g.\n `with DockerContainer('ubuntu') as docker:`\n\n A bash shell is running in the remote container. When `call()` is invoked,\n the command is relayed to the remote shell, and the results are streamed\n back to cibuildwheel.\n '''\n UTILITY_PYTHON = '/opt/python/cp38-cp38/bin/python'\n\n process: subprocess.Popen\n bash_stdin: IO[bytes]\n bash_stdout: IO[bytes]\n\n def __init__(self, docker_image: str, simulate_32_bit=False):\n self.docker_image = docker_image\n self.simulate_32_bit = simulate_32_bit\n\n def __enter__(self) -> 'DockerContainer':\n self.name = f'cibuildwheel-{uuid.uuid4()}'\n shell_args = ['linux32', '/bin/bash'] if self.simulate_32_bit else ['/bin/bash']\n subprocess.run(\n [\n 'docker', 'create',\n '--env', 'CIBUILDWHEEL',\n '--name', self.name,\n '-i',\n '-v', '/:/host', # ignored on CircleCI\n self.docker_image,\n *shell_args\n ],\n check=True,\n )\n self.process = subprocess.Popen(\n [\n 'docker', 'start',\n '--attach', '--interactive',\n self.name,\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n\n assert self.process.stdin and self.process.stdout\n self.bash_stdin = self.process.stdin\n self.bash_stdout = self.process.stdout\n\n # run a noop command to block until the container is responding\n self.call(['/bin/true'])\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.bash_stdin.close()\n self.process.terminate()\n self.process.wait()\n\n subprocess.run(['docker', 'rm', '--force', '-v', self.name])\n self.name = None\n\n def copy_into(self, from_path: Path, to_path: PurePath) -> None:\n # `docker cp` causes 'no space left on device' error when\n # a container is running and the host filesystem is\n # mounted. https://github.com/moby/moby/issues/38995\n # Use `docker exec` instead.\n\n if from_path.is_dir():\n self.call(['mkdir', '-p', to_path])\n subprocess.run(\n f'tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -',\n shell=True,\n check=True,\n cwd=from_path)\n else:\n subprocess.run(\n f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c \"cat > {shell_quote(to_path)}\"',\n shell=True,\n check=True)\n\n def copy_out(self, from_path: PurePath, to_path: Path) -> None:\n # note: we assume from_path is a dir\n to_path.mkdir(parents=True, exist_ok=True)\n\n subprocess.run(\n f'docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -',\n shell=True,\n check=True,\n cwd=to_path\n )\n\n def glob(self, path: PurePath, pattern: str) -> List[PurePath]:\n glob_pattern = os.path.join(str(path), pattern)\n\n path_strs = json.loads(self.call([\n self.UTILITY_PYTHON,\n '-c',\n f'import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)'\n ], capture_output=True))\n\n return [PurePath(p) for p in path_strs]\n\n def call(self, args: Sequence[Union[str, PathLike]], env: Dict[str, str] = {},\n capture_output=False, cwd: Optional[Union[str, PathLike]] = None) -> str:\n chdir = f'cd {cwd}' if cwd else ''\n env_assignments = ' '.join(f'{shlex.quote(k)}={shlex.quote(v)}'\n for k, v in env.items())\n command = ' '.join(shlex.quote(str(a)) for a in args)\n end_of_message = str(uuid.uuid4())\n\n # log the command we're executing\n print(f' + {command}')\n\n # Write a command to the remote shell. First we change the\n # cwd, if that's required. Then, we use the `env` utility to run\n # `command` inside the specified environment. We use `env` because it\n # can cope with spaces and strange characters in the name or value.\n # Finally, the remote shell is told to write a footer - this will show\n # up in the output so we know when to stop reading, and will include\n # the returncode of `command`.\n self.bash_stdin.write(bytes(f'''(\n {chdir}\n env {env_assignments} {command}\n printf \"%04d%s\\n\" $? {end_of_message}\n )\n ''', encoding='utf8', errors='surrogateescape'))\n self.bash_stdin.flush()\n\n if capture_output:\n output_io: IO[bytes] = io.BytesIO()\n else:\n output_io = sys.stdout.buffer\n\n while True:\n line = self.bash_stdout.readline()\n\n if line.endswith(b'%s\\n' % (bytes(end_of_message, encoding='utf8'))):\n footer_offset = (\n len(line)\n - 1 # newline character\n - len(end_of_message) # delimiter\n - 4 # 4 returncode decimals\n )\n returncode_str = line[footer_offset:footer_offset+4]\n returncode = int(returncode_str)\n # add the last line to output, without the footer\n output_io.write(line[0:footer_offset])\n break\n else:\n output_io.write(line)\n\n if isinstance(output_io, io.BytesIO):\n output = str(output_io.getvalue(), encoding='utf8', errors='surrogateescape')\n else:\n output = ''\n\n if returncode != 0:\n raise subprocess.CalledProcessError(returncode, args, output)\n\n return output\n\n def get_environment(self) -> Dict[str, str]:\n return json.loads(self.call([\n self.UTILITY_PYTHON,\n '-c',\n 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\n ], capture_output=True))\n\n def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n return self.call(command, env=environment)\n\n\ndef shell_quote(path: PurePath) -> str:\n return shlex.quote(str(path))\n", "path": "cibuildwheel/docker_container.py"}]} | 3,666 | 496 |
gh_patches_debug_57019 | rasdani/github-patches | git_diff | fidals__shopelectro-415 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Убери пункт меню
Убери mp3 колонки из меню. Их больше не будет.
Я зашел в админку и выключил категорию. Надеюсь правильно )
http://prntscr.com/k553lt
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/templatetags/se_extras.py`
Content:
```
1 import datetime
2 import math
3
4 from django import template
5 from django.conf import settings
6 from django.contrib.humanize.templatetags.humanize import intcomma
7 from django.template.defaultfilters import floatformat
8 from django.urls import reverse
9
10 from images.models import ImageMixin
11 from pages.models import Page
12
13 from shopelectro import config
14 from shopelectro.models import Category
15
16 register = template.Library()
17
18
19 # TODO - move it in catalog. Inspired by lp_electric
20 @register.simple_tag
21 def roots():
22 return sorted(
23 Category.objects
24 .select_related('page')
25 .get_cached_trees(), # https://goo.gl/rFKiku
26 key=lambda x: x.page.position
27 )
28
29
30 @register.simple_tag
31 def footer_links():
32 return config.FOOTER_LINKS
33
34
35 # TODO - move in pages. Inspired by LP electric
36 @register.filter
37 def class_name(model):
38 """Return Model name."""
39 return type(model).__name__
40
41
42 @register.simple_tag
43 def time_to_call():
44 def is_weekend(t):
45 return t.weekday() > 4
46
47 def is_friday(t):
48 return t.weekday() == 4
49
50 def not_yet_opened(t):
51 current_time = (t.hour, t.minute)
52 open_time = (10, 00)
53 return current_time < open_time and not is_weekend(t)
54
55 def is_closed(t):
56 current_time = (t.hour, t.minute)
57 closing_time = (16, 30) if is_friday(t) else (17, 30)
58 return current_time > closing_time
59
60 when_we_call = {
61 lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): 'В понедельник в 10:30',
62 lambda now: not_yet_opened(now): 'Сегодня в 10:30',
63 lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): 'Завтра в 10:30',
64 lambda _: True: 'В течение 30 минут'
65 }
66
67 time_ = datetime.datetime.now()
68 call = ' позвонит менеджер и обсудит детали доставки.'
69 for condition, time in when_we_call.items():
70 if condition(time_):
71 return time + call
72
73
74 # TODO - move it in pages.
75 @register.simple_tag
76 def full_url(url_name, *args):
77 return settings.BASE_URL + reverse(url_name, args=args)
78
79
80 @register.filter
81 def humanize_price(price):
82 return intcomma(floatformat(price, 0))
83
84
85 # Not good code, but duker at 06/10/2016 don't know how to fix it.
86 # It makes Image model very complex.
87 @register.simple_tag
88 def get_img_alt(entity: ImageMixin):
89 product_alt = 'Фотография {}'
90 logo_alt = 'Логотип компании Shopelectro'
91
92 if not isinstance(entity, Page):
93 return logo_alt
94
95 # try one of this attributes to get pages name
96 name_attrs = ['h1', 'title', 'name']
97 entity_name = next(
98 getattr(entity, attr)
99 for attr in name_attrs
100 if getattr(entity, attr)
101 )
102 return product_alt.format(entity_name)
103
104
105 @register.simple_tag
106 def main_image_or_logo(page: Page):
107 """Used for microdata."""
108 if hasattr(page, 'main_image') and page.main_image:
109 return page.main_image.url
110 else:
111 return settings.STATIC_URL + 'images/logo.png'
112
113
114 @register.inclusion_tag('catalog/product_feedbacks_icons.html')
115 def icon_stars(rating=0):
116 """Render set of rating icons based on 1 through 5 rating values."""
117 full_icons = int(math.floor(rating))
118 half_icons = 0 if rating == int(rating) else 1
119 empty_icons = 5 - full_icons - half_icons
120
121 return {
122 'full_icons': range(full_icons),
123 'half_icons': range(half_icons),
124 'empty_icons': range(empty_icons),
125 }
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py
--- a/shopelectro/templatetags/se_extras.py
+++ b/shopelectro/templatetags/se_extras.py
@@ -20,10 +20,13 @@
@register.simple_tag
def roots():
return sorted(
- Category.objects
- .select_related('page')
- .get_cached_trees(), # https://goo.gl/rFKiku
- key=lambda x: x.page.position
+ filter(
+ lambda x: x.page.is_active,
+ Category.objects # https://goo.gl/rFKiku
+ .select_related('page')
+ .get_cached_trees()
+ ),
+ key=lambda x: x.page.position,
)
| {"golden_diff": "diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py\n--- a/shopelectro/templatetags/se_extras.py\n+++ b/shopelectro/templatetags/se_extras.py\n@@ -20,10 +20,13 @@\n @register.simple_tag\n def roots():\n return sorted(\n- Category.objects\n- .select_related('page')\n- .get_cached_trees(), # https://goo.gl/rFKiku\n- key=lambda x: x.page.position\n+ filter(\n+ lambda x: x.page.is_active,\n+ Category.objects # https://goo.gl/rFKiku\n+ .select_related('page')\n+ .get_cached_trees()\n+ ),\n+ key=lambda x: x.page.position,\n )\n", "issue": "\u0423\u0431\u0435\u0440\u0438 \u043f\u0443\u043d\u043a\u0442 \u043c\u0435\u043d\u044e\n\u0423\u0431\u0435\u0440\u0438 mp3 \u043a\u043e\u043b\u043e\u043d\u043a\u0438 \u0438\u0437 \u043c\u0435\u043d\u044e. \u0418\u0445 \u0431\u043e\u043b\u044c\u0448\u0435 \u043d\u0435 \u0431\u0443\u0434\u0435\u0442.\r\n\u042f \u0437\u0430\u0448\u0435\u043b \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443 \u0438 \u0432\u044b\u043a\u043b\u044e\u0447\u0438\u043b \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044e. \u041d\u0430\u0434\u0435\u044e\u0441\u044c \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e ) \r\nhttp://prntscr.com/k553lt\n", "before_files": [{"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\n\nfrom shopelectro import config\nfrom shopelectro.models import Category\n\nregister = template.Library()\n\n\n# TODO - move it in catalog. Inspired by lp_electric\[email protected]_tag\ndef roots():\n return sorted(\n Category.objects\n .select_related('page')\n .get_cached_trees(), # https://goo.gl/rFKiku\n key=lambda x: x.page.position\n )\n\n\[email protected]_tag\ndef footer_links():\n return config.FOOTER_LINKS\n\n\n# TODO - move in pages. Inspired by LP electric\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\n# TODO - move it in pages.\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}], "after_files": [{"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\n\nfrom shopelectro import config\nfrom shopelectro.models import Category\n\nregister = template.Library()\n\n\n# TODO - move it in catalog. Inspired by lp_electric\[email protected]_tag\ndef roots():\n return sorted(\n filter(\n lambda x: x.page.is_active,\n Category.objects # https://goo.gl/rFKiku\n .select_related('page')\n .get_cached_trees()\n ),\n key=lambda x: x.page.position,\n )\n\n\[email protected]_tag\ndef footer_links():\n return config.FOOTER_LINKS\n\n\n# TODO - move in pages. Inspired by LP electric\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\n# TODO - move it in pages.\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}]} | 1,508 | 188 |
gh_patches_debug_9311 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-3578 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pyside2 looking different from when running as script.
Hi. Im using latest development version.
As you can see when i run my test script:
```
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(150, 90, 97, 34))
self.pushButton.setObjectName("pushButton")
self.tableWidget = QtWidgets.QTableWidget(self.tab)
self.tableWidget.setGeometry(QtCore.QRect(140, 150, 256, 192))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "New Column"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "New Column"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "New Column"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
```
from console:

and as the binary pyinstaller creates:

It looks very different(ignore the blue line, that's something that happened recently on this comp for some reason but the problem existed previous to this). Perhaps this has something to do with the themes in qt5_plugins? Anyway, how can i fix this? Ideally it would be possible to bundle my theme "breeze dark" with the application so the user has the ability to use that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-PySide2.QtGui.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2018, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 from PyInstaller.utils.hooks import qt_plugins_binaries
11 from PyInstaller.compat import is_linux
12
13 hiddenimports = ['PySide2.QtCore']
14
15 binaries = []
16 binaries.extend(qt_plugins_binaries('accessible', namespace='PySide2'))
17 binaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2'))
18 binaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2'))
19 binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))
20 binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))
21 binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))
22
23 if is_linux:
24 binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-PySide2.QtGui.py b/PyInstaller/hooks/hook-PySide2.QtGui.py
--- a/PyInstaller/hooks/hook-PySide2.QtGui.py
+++ b/PyInstaller/hooks/hook-PySide2.QtGui.py
@@ -19,6 +19,7 @@
binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))
binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))
binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))
+binaries.extend(qt_plugins_binaries('styles', namespace='PySide2'))
if is_linux:
binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PySide2.QtGui.py b/PyInstaller/hooks/hook-PySide2.QtGui.py\n--- a/PyInstaller/hooks/hook-PySide2.QtGui.py\n+++ b/PyInstaller/hooks/hook-PySide2.QtGui.py\n@@ -19,6 +19,7 @@\n binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\n binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\n binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\n+binaries.extend(qt_plugins_binaries('styles', namespace='PySide2'))\n \n if is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "issue": "Pyside2 looking different from when running as script.\nHi. Im using latest development version.\r\n\r\nAs you can see when i run my test script:\r\n```\r\nfrom PySide2 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n self.tab = QtWidgets.QWidget()\r\n self.tab.setObjectName(\"tab\")\r\n self.pushButton = QtWidgets.QPushButton(self.tab)\r\n self.pushButton.setGeometry(QtCore.QRect(150, 90, 97, 34))\r\n self.pushButton.setObjectName(\"pushButton\")\r\n self.tableWidget = QtWidgets.QTableWidget(self.tab)\r\n self.tableWidget.setGeometry(QtCore.QRect(140, 150, 256, 192))\r\n self.tableWidget.setObjectName(\"tableWidget\")\r\n self.tableWidget.setColumnCount(3)\r\n self.tableWidget.setRowCount(0)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(0, item)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(1, item)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(2, item)\r\n self.tabWidget.addTab(self.tab, \"\")\r\n self.tab_2 = QtWidgets.QWidget()\r\n self.tab_2.setObjectName(\"tab_2\")\r\n self.tabWidget.addTab(self.tab_2, \"\")\r\n self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.pushButton.setText(_translate(\"MainWindow\", \"PushButton\"))\r\n item = self.tableWidget.horizontalHeaderItem(0)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n item = self.tableWidget.horizontalHeaderItem(1)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n item = self.tableWidget.horizontalHeaderItem(2)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"Tab 1\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"Tab 2\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n```\r\n\r\nfrom console:\r\n\r\nand as the binary pyinstaller creates:\r\n\r\n\r\nIt looks very different(ignore the blue line, that's something that happened recently on this comp for some reason but the problem existed previous to this). Perhaps this has something to do with the themes in qt5_plugins? Anyway, how can i fix this? Ideally it would be possible to bundle my theme \"breeze dark\" with the application so the user has the ability to use that.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import qt_plugins_binaries\nfrom PyInstaller.compat import is_linux\n\nhiddenimports = ['PySide2.QtCore']\n\nbinaries = []\nbinaries.extend(qt_plugins_binaries('accessible', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\n\nif is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "path": "PyInstaller/hooks/hook-PySide2.QtGui.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import qt_plugins_binaries\nfrom PyInstaller.compat import is_linux\n\nhiddenimports = ['PySide2.QtCore']\n\nbinaries = []\nbinaries.extend(qt_plugins_binaries('accessible', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('styles', namespace='PySide2'))\n\nif is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "path": "PyInstaller/hooks/hook-PySide2.QtGui.py"}]} | 1,470 | 169 |
gh_patches_debug_5669 | rasdani/github-patches | git_diff | lutris__lutris-3705 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Store banners in .cache
They have no place to be in .local/share
Store banners in .cache
They have no place to be in .local/share
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/settings.py`
Content:
```
1 """Internal settings."""
2 import os
3 import sys
4 from gettext import gettext as _
5
6 from gi.repository import GLib
7
8 from lutris import __version__
9 from lutris.util.settings import SettingsIO
10
11 PROJECT = "Lutris"
12 VERSION = __version__
13 COPYRIGHT = _("(c) 2010-2021 Lutris Team")
14 AUTHORS = [_("The Lutris team")]
15
16 # Paths
17 CONFIG_DIR = os.path.join(GLib.get_user_config_dir(), "lutris")
18 CONFIG_FILE = os.path.join(CONFIG_DIR, "lutris.conf")
19 DATA_DIR = os.path.join(GLib.get_user_data_dir(), "lutris")
20 RUNNER_DIR = os.path.join(DATA_DIR, "runners")
21 RUNTIME_DIR = os.path.join(DATA_DIR, "runtime")
22 CACHE_DIR = os.path.join(GLib.get_user_cache_dir(), "lutris")
23 GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games")
24
25 TMP_PATH = os.path.join(CACHE_DIR, "tmp")
26 BANNER_PATH = os.path.join(DATA_DIR, "banners")
27 COVERART_PATH = os.path.join(DATA_DIR, "coverart")
28 ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps")
29
30 sio = SettingsIO(CONFIG_FILE)
31 if "nosetests" in sys.argv[0] or "pytest" in sys.argv[0]:
32 PGA_DB = "/tmp/pga.db"
33 else:
34 PGA_DB = sio.read_setting("pga_path") or os.path.join(DATA_DIR, "pga.db")
35
36 SITE_URL = sio.read_setting("website") or "https://lutris.net"
37
38 DRIVER_HOWTO_URL = "https://github.com/lutris/docs/blob/master/InstallingDrivers.md"
39 INSTALLER_URL = SITE_URL + "/api/installers/%s"
40 # XXX change this, should query on the installer, not the game.
41 INSTALLER_REVISION_URL = SITE_URL + "/api/installers/games/%s/revisions/%s"
42 GAME_URL = SITE_URL + "/games/%s/"
43 RUNTIME_URL = SITE_URL + "/api/runtimes"
44
45 STEAM_API_KEY = sio.read_setting("steam_api_key") or "34C9698CEB394AB4401D65927C6B3752"
46 DISCORD_CLIENT_ID = sio.read_setting("discord_client_id") or "618290412402114570"
47
48
49 read_setting = sio.read_setting
50 write_setting = sio.write_setting
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/settings.py b/lutris/settings.py
--- a/lutris/settings.py
+++ b/lutris/settings.py
@@ -23,7 +23,7 @@
GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games")
TMP_PATH = os.path.join(CACHE_DIR, "tmp")
-BANNER_PATH = os.path.join(DATA_DIR, "banners")
+BANNER_PATH = os.path.join(CACHE_DIR, "banners")
COVERART_PATH = os.path.join(DATA_DIR, "coverart")
ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps")
| {"golden_diff": "diff --git a/lutris/settings.py b/lutris/settings.py\n--- a/lutris/settings.py\n+++ b/lutris/settings.py\n@@ -23,7 +23,7 @@\n GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n \n TMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\n-BANNER_PATH = os.path.join(DATA_DIR, \"banners\")\n+BANNER_PATH = os.path.join(CACHE_DIR, \"banners\")\n COVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\n ICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n", "issue": "Store banners in .cache\nThey have no place to be in .local/share\nStore banners in .cache\nThey have no place to be in .local/share\n", "before_files": [{"content": "\"\"\"Internal settings.\"\"\"\nimport os\nimport sys\nfrom gettext import gettext as _\n\nfrom gi.repository import GLib\n\nfrom lutris import __version__\nfrom lutris.util.settings import SettingsIO\n\nPROJECT = \"Lutris\"\nVERSION = __version__\nCOPYRIGHT = _(\"(c) 2010-2021 Lutris Team\")\nAUTHORS = [_(\"The Lutris team\")]\n\n# Paths\nCONFIG_DIR = os.path.join(GLib.get_user_config_dir(), \"lutris\")\nCONFIG_FILE = os.path.join(CONFIG_DIR, \"lutris.conf\")\nDATA_DIR = os.path.join(GLib.get_user_data_dir(), \"lutris\")\nRUNNER_DIR = os.path.join(DATA_DIR, \"runners\")\nRUNTIME_DIR = os.path.join(DATA_DIR, \"runtime\")\nCACHE_DIR = os.path.join(GLib.get_user_cache_dir(), \"lutris\")\nGAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n\nTMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\nBANNER_PATH = os.path.join(DATA_DIR, \"banners\")\nCOVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\nICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n\nsio = SettingsIO(CONFIG_FILE)\nif \"nosetests\" in sys.argv[0] or \"pytest\" in sys.argv[0]:\n PGA_DB = \"/tmp/pga.db\"\nelse:\n PGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\n\nSITE_URL = sio.read_setting(\"website\") or \"https://lutris.net\"\n\nDRIVER_HOWTO_URL = \"https://github.com/lutris/docs/blob/master/InstallingDrivers.md\"\nINSTALLER_URL = SITE_URL + \"/api/installers/%s\"\n# XXX change this, should query on the installer, not the game.\nINSTALLER_REVISION_URL = SITE_URL + \"/api/installers/games/%s/revisions/%s\"\nGAME_URL = SITE_URL + \"/games/%s/\"\nRUNTIME_URL = SITE_URL + \"/api/runtimes\"\n\nSTEAM_API_KEY = sio.read_setting(\"steam_api_key\") or \"34C9698CEB394AB4401D65927C6B3752\"\nDISCORD_CLIENT_ID = sio.read_setting(\"discord_client_id\") or \"618290412402114570\"\n\n\nread_setting = sio.read_setting\nwrite_setting = sio.write_setting\n", "path": "lutris/settings.py"}], "after_files": [{"content": "\"\"\"Internal settings.\"\"\"\nimport os\nimport sys\nfrom gettext import gettext as _\n\nfrom gi.repository import GLib\n\nfrom lutris import __version__\nfrom lutris.util.settings import SettingsIO\n\nPROJECT = \"Lutris\"\nVERSION = __version__\nCOPYRIGHT = _(\"(c) 2010-2021 Lutris Team\")\nAUTHORS = [_(\"The Lutris team\")]\n\n# Paths\nCONFIG_DIR = os.path.join(GLib.get_user_config_dir(), \"lutris\")\nCONFIG_FILE = os.path.join(CONFIG_DIR, \"lutris.conf\")\nDATA_DIR = os.path.join(GLib.get_user_data_dir(), \"lutris\")\nRUNNER_DIR = os.path.join(DATA_DIR, \"runners\")\nRUNTIME_DIR = os.path.join(DATA_DIR, \"runtime\")\nCACHE_DIR = os.path.join(GLib.get_user_cache_dir(), \"lutris\")\nGAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n\nTMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\nBANNER_PATH = os.path.join(CACHE_DIR, \"banners\")\nCOVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\nICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n\nsio = SettingsIO(CONFIG_FILE)\nif \"nosetests\" in sys.argv[0] or \"pytest\" in sys.argv[0]:\n PGA_DB = \"/tmp/pga.db\"\nelse:\n PGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\n\nSITE_URL = sio.read_setting(\"website\") or \"https://lutris.net\"\n\nDRIVER_HOWTO_URL = \"https://github.com/lutris/docs/blob/master/InstallingDrivers.md\"\nINSTALLER_URL = SITE_URL + \"/api/installers/%s\"\n# XXX change this, should query on the installer, not the game.\nINSTALLER_REVISION_URL = SITE_URL + \"/api/installers/games/%s/revisions/%s\"\nGAME_URL = SITE_URL + \"/games/%s/\"\nRUNTIME_URL = SITE_URL + \"/api/runtimes\"\n\nSTEAM_API_KEY = sio.read_setting(\"steam_api_key\") or \"34C9698CEB394AB4401D65927C6B3752\"\nDISCORD_CLIENT_ID = sio.read_setting(\"discord_client_id\") or \"618290412402114570\"\n\n\nread_setting = sio.read_setting\nwrite_setting = sio.write_setting\n", "path": "lutris/settings.py"}]} | 936 | 146 |
gh_patches_debug_804 | rasdani/github-patches | git_diff | scikit-hep__awkward-1830 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ak.fill_none(axis=None)` does nothing
### Version of Awkward Array
main
### Description and code to reproduce
The `apply` function for this case does not return (or recurse)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/operations/ak_fill_none.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak.nplikes.NumpyMetadata.instance()
8
9
10 def fill_none(array, value, axis=-1, highlevel=True, behavior=None):
11 """
12 Args:
13 array: Data in which to replace None with a given value.
14 value: Data with which to replace None.
15 axis (None or int): If None, replace all None values in the array
16 with the given value; if an int, The dimension at which this
17 operation is applied. The outermost dimension is `0`, followed
18 by `1`, etc., and negative values count backward from the
19 innermost: `-1` is the innermost dimension, `-2` is the next
20 level up, etc.
21 highlevel (bool): If True, return an #ak.Array; otherwise, return
22 a low-level #ak.contents.Content subclass.
23 behavior (None or dict): Custom #ak.behavior for the output array, if
24 high-level.
25
26 Replaces missing values (None) with a given `value`.
27
28 For example, in the following `array`,
29
30 ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
31
32 The None values could be replaced with `0` by
33
34 >>> ak.fill_none(array, 0)
35 <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
36
37 The replacement value doesn't strictly need the same type as the
38 surrounding data. For example, the None values could also be replaced
39 by a string.
40
41 >>> ak.fill_none(array, "hi")
42 <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>
43
44 The list content now has a union type:
45
46 >>> ak.type(ak.fill_none(array, "hi"))
47 3 * var * union[float64, string]
48
49 The values could be floating-point numbers or strings.
50 """
51 with ak._errors.OperationErrorContext(
52 "ak.fill_none",
53 dict(
54 array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior
55 ),
56 ):
57 return _impl(array, value, axis, highlevel, behavior)
58
59
60 def _impl(array, value, axis, highlevel, behavior):
61 arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
62 nplike = ak.nplikes.nplike_of(arraylayout)
63
64 # Convert value type to appropriate layout
65 if (
66 isinstance(value, np.ndarray)
67 and issubclass(value.dtype.type, (np.bool_, np.number))
68 and len(value.shape) != 0
69 ):
70 valuelayout = ak.operations.to_layout(
71 nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False
72 )
73 elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
74 isinstance(value, np.ndarray)
75 and issubclass(value.dtype.type, (np.bool_, np.number))
76 ):
77 valuelayout = ak.operations.to_layout(
78 nplike.asarray(value), allow_record=False, allow_other=False
79 )
80 elif (
81 ak._util.is_sized_iterable(value)
82 and not (isinstance(value, (str, bytes)))
83 or isinstance(value, (ak.highlevel.Record, ak.record.Record))
84 ):
85 valuelayout = ak.operations.to_layout(
86 value, allow_record=True, allow_other=False
87 )
88 if isinstance(valuelayout, ak.record.Record):
89 valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
90 elif len(valuelayout) == 0:
91 offsets = ak.index.Index64(
92 nplike.array([0, 0], dtype=np.int64), nplike=nplike
93 )
94 valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)
95 else:
96 valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)
97 else:
98 valuelayout = ak.operations.to_layout(
99 [value], allow_record=False, allow_other=False
100 )
101
102 def maybe_fillna(layout):
103 if layout.is_OptionType:
104 return layout.fill_none(valuelayout)
105 else:
106 return layout
107
108 if axis is None:
109
110 def action(layout, depth, depth_context, **kwargs):
111 layout = maybe_fillna(layout)
112
113 else:
114
115 def action(layout, depth, depth_context, **kwargs):
116 posaxis = layout.axis_wrap_if_negative(depth_context["posaxis"])
117 depth_context["posaxis"] = posaxis
118 if posaxis + 1 < depth:
119 return layout
120 elif posaxis + 1 == depth:
121 return maybe_fillna(layout)
122
123 depth_context = {"posaxis": axis}
124 out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)
125
126 return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py
--- a/src/awkward/operations/ak_fill_none.py
+++ b/src/awkward/operations/ak_fill_none.py
@@ -107,8 +107,8 @@
if axis is None:
- def action(layout, depth, depth_context, **kwargs):
- layout = maybe_fillna(layout)
+ def action(layout, continuation, **kwargs):
+ return maybe_fillna(continuation())
else:
| {"golden_diff": "diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py\n--- a/src/awkward/operations/ak_fill_none.py\n+++ b/src/awkward/operations/ak_fill_none.py\n@@ -107,8 +107,8 @@\n \n if axis is None:\n \n- def action(layout, depth, depth_context, **kwargs):\n- layout = maybe_fillna(layout)\n+ def action(layout, continuation, **kwargs):\n+ return maybe_fillna(continuation())\n \n else:\n", "issue": "`ak.fill_none(axis=None)` does nothing\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThe `apply` function for this case does not return (or recurse)\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in which to replace None with a given value.\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following `array`,\n\n ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.type(ak.fill_none(array, \"hi\"))\n 3 * var * union[float64, string]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n nplike = ak.nplikes.nplike_of(arraylayout)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n nplike.array([0, 0], dtype=np.int64), nplike=nplike\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_OptionType:\n return layout.fill_none(valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, depth, depth_context, **kwargs):\n layout = maybe_fillna(layout)\n\n else:\n\n def action(layout, depth, depth_context, **kwargs):\n posaxis = layout.axis_wrap_if_negative(depth_context[\"posaxis\"])\n depth_context[\"posaxis\"] = posaxis\n if posaxis + 1 < depth:\n return layout\n elif posaxis + 1 == depth:\n return maybe_fillna(layout)\n\n depth_context = {\"posaxis\": axis}\n out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)\n\n return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in which to replace None with a given value.\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following `array`,\n\n ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.type(ak.fill_none(array, \"hi\"))\n 3 * var * union[float64, string]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n nplike = ak.nplikes.nplike_of(arraylayout)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n nplike.array([0, 0], dtype=np.int64), nplike=nplike\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_OptionType:\n return layout.fill_none(valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, continuation, **kwargs):\n return maybe_fillna(continuation())\n\n else:\n\n def action(layout, depth, depth_context, **kwargs):\n posaxis = layout.axis_wrap_if_negative(depth_context[\"posaxis\"])\n depth_context[\"posaxis\"] = posaxis\n if posaxis + 1 < depth:\n return layout\n elif posaxis + 1 == depth:\n return maybe_fillna(layout)\n\n depth_context = {\"posaxis\": axis}\n out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)\n\n return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}]} | 1,742 | 126 |
gh_patches_debug_36171 | rasdani/github-patches | git_diff | pandas-dev__pandas-28230 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ASV Benchmark for read_excel
Right now we don't benchmark performance of read_excel across the various engines (xlrd, openpyxl and odfpy) so would gladly accept contributions to improve benchmark coverage on those
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `asv_bench/benchmarks/io/excel.py`
Content:
```
1 from io import BytesIO
2
3 import numpy as np
4
5 from pandas import DataFrame, ExcelWriter, date_range, read_excel
6 import pandas.util.testing as tm
7
8
9 class Excel:
10
11 params = ["openpyxl", "xlsxwriter", "xlwt"]
12 param_names = ["engine"]
13
14 def setup(self, engine):
15 N = 2000
16 C = 5
17 self.df = DataFrame(
18 np.random.randn(N, C),
19 columns=["float{}".format(i) for i in range(C)],
20 index=date_range("20000101", periods=N, freq="H"),
21 )
22 self.df["object"] = tm.makeStringIndex(N)
23 self.bio_read = BytesIO()
24 self.writer_read = ExcelWriter(self.bio_read, engine=engine)
25 self.df.to_excel(self.writer_read, sheet_name="Sheet1")
26 self.writer_read.save()
27 self.bio_read.seek(0)
28
29 def time_read_excel(self, engine):
30 read_excel(self.bio_read)
31
32 def time_write_excel(self, engine):
33 bio_write = BytesIO()
34 bio_write.seek(0)
35 writer_write = ExcelWriter(bio_write, engine=engine)
36 self.df.to_excel(writer_write, sheet_name="Sheet1")
37 writer_write.save()
38
39
40 from ..pandas_vb_common import setup # noqa: F401 isort:skip
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -1,40 +1,72 @@
from io import BytesIO
import numpy as np
+from odf.opendocument import OpenDocumentSpreadsheet
+from odf.table import Table, TableCell, TableRow
+from odf.text import P
from pandas import DataFrame, ExcelWriter, date_range, read_excel
import pandas.util.testing as tm
-class Excel:
+def _generate_dataframe():
+ N = 2000
+ C = 5
+ df = DataFrame(
+ np.random.randn(N, C),
+ columns=["float{}".format(i) for i in range(C)],
+ index=date_range("20000101", periods=N, freq="H"),
+ )
+ df["object"] = tm.makeStringIndex(N)
+ return df
+
+
+class WriteExcel:
params = ["openpyxl", "xlsxwriter", "xlwt"]
param_names = ["engine"]
def setup(self, engine):
- N = 2000
- C = 5
- self.df = DataFrame(
- np.random.randn(N, C),
- columns=["float{}".format(i) for i in range(C)],
- index=date_range("20000101", periods=N, freq="H"),
- )
- self.df["object"] = tm.makeStringIndex(N)
- self.bio_read = BytesIO()
- self.writer_read = ExcelWriter(self.bio_read, engine=engine)
- self.df.to_excel(self.writer_read, sheet_name="Sheet1")
- self.writer_read.save()
- self.bio_read.seek(0)
-
- def time_read_excel(self, engine):
- read_excel(self.bio_read)
+ self.df = _generate_dataframe()
def time_write_excel(self, engine):
- bio_write = BytesIO()
- bio_write.seek(0)
- writer_write = ExcelWriter(bio_write, engine=engine)
- self.df.to_excel(writer_write, sheet_name="Sheet1")
- writer_write.save()
+ bio = BytesIO()
+ bio.seek(0)
+ writer = ExcelWriter(bio, engine=engine)
+ self.df.to_excel(writer, sheet_name="Sheet1")
+ writer.save()
+
+
+class ReadExcel:
+
+ params = ["xlrd", "openpyxl", "odf"]
+ param_names = ["engine"]
+ fname_excel = "spreadsheet.xlsx"
+ fname_odf = "spreadsheet.ods"
+
+ def _create_odf(self):
+ doc = OpenDocumentSpreadsheet()
+ table = Table(name="Table1")
+ for row in self.df.values:
+ tr = TableRow()
+ for val in row:
+ tc = TableCell(valuetype="string")
+ tc.addElement(P(text=val))
+ tr.addElement(tc)
+ table.addElement(tr)
+
+ doc.spreadsheet.addElement(table)
+ doc.save(self.fname_odf)
+
+ def setup_cache(self):
+ self.df = _generate_dataframe()
+
+ self.df.to_excel(self.fname_excel, sheet_name="Sheet1")
+ self._create_odf()
+
+ def time_read_excel(self, engine):
+ fname = self.fname_odf if engine == "odf" else self.fname_excel
+ read_excel(fname, engine=engine)
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| {"golden_diff": "diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py\n--- a/asv_bench/benchmarks/io/excel.py\n+++ b/asv_bench/benchmarks/io/excel.py\n@@ -1,40 +1,72 @@\n from io import BytesIO\n \n import numpy as np\n+from odf.opendocument import OpenDocumentSpreadsheet\n+from odf.table import Table, TableCell, TableRow\n+from odf.text import P\n \n from pandas import DataFrame, ExcelWriter, date_range, read_excel\n import pandas.util.testing as tm\n \n \n-class Excel:\n+def _generate_dataframe():\n+ N = 2000\n+ C = 5\n+ df = DataFrame(\n+ np.random.randn(N, C),\n+ columns=[\"float{}\".format(i) for i in range(C)],\n+ index=date_range(\"20000101\", periods=N, freq=\"H\"),\n+ )\n+ df[\"object\"] = tm.makeStringIndex(N)\n+ return df\n+\n+\n+class WriteExcel:\n \n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n \n def setup(self, engine):\n- N = 2000\n- C = 5\n- self.df = DataFrame(\n- np.random.randn(N, C),\n- columns=[\"float{}\".format(i) for i in range(C)],\n- index=date_range(\"20000101\", periods=N, freq=\"H\"),\n- )\n- self.df[\"object\"] = tm.makeStringIndex(N)\n- self.bio_read = BytesIO()\n- self.writer_read = ExcelWriter(self.bio_read, engine=engine)\n- self.df.to_excel(self.writer_read, sheet_name=\"Sheet1\")\n- self.writer_read.save()\n- self.bio_read.seek(0)\n-\n- def time_read_excel(self, engine):\n- read_excel(self.bio_read)\n+ self.df = _generate_dataframe()\n \n def time_write_excel(self, engine):\n- bio_write = BytesIO()\n- bio_write.seek(0)\n- writer_write = ExcelWriter(bio_write, engine=engine)\n- self.df.to_excel(writer_write, sheet_name=\"Sheet1\")\n- writer_write.save()\n+ bio = BytesIO()\n+ bio.seek(0)\n+ writer = ExcelWriter(bio, engine=engine)\n+ self.df.to_excel(writer, sheet_name=\"Sheet1\")\n+ writer.save()\n+\n+\n+class ReadExcel:\n+\n+ params = [\"xlrd\", \"openpyxl\", \"odf\"]\n+ param_names = [\"engine\"]\n+ fname_excel = \"spreadsheet.xlsx\"\n+ fname_odf = \"spreadsheet.ods\"\n+\n+ def _create_odf(self):\n+ doc = OpenDocumentSpreadsheet()\n+ table = Table(name=\"Table1\")\n+ for row in self.df.values:\n+ tr = TableRow()\n+ for val in row:\n+ tc = TableCell(valuetype=\"string\")\n+ tc.addElement(P(text=val))\n+ tr.addElement(tc)\n+ table.addElement(tr)\n+\n+ doc.spreadsheet.addElement(table)\n+ doc.save(self.fname_odf)\n+\n+ def setup_cache(self):\n+ self.df = _generate_dataframe()\n+\n+ self.df.to_excel(self.fname_excel, sheet_name=\"Sheet1\")\n+ self._create_odf()\n+\n+ def time_read_excel(self, engine):\n+ fname = self.fname_odf if engine == \"odf\" else self.fname_excel\n+ read_excel(fname, engine=engine)\n \n \n from ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "issue": "ASV Benchmark for read_excel\nRight now we don't benchmark performance of read_excel across the various engines (xlrd, openpyxl and odfpy) so would gladly accept contributions to improve benchmark coverage on those\n", "before_files": [{"content": "from io import BytesIO\n\nimport numpy as np\n\nfrom pandas import DataFrame, ExcelWriter, date_range, read_excel\nimport pandas.util.testing as tm\n\n\nclass Excel:\n\n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n\n def setup(self, engine):\n N = 2000\n C = 5\n self.df = DataFrame(\n np.random.randn(N, C),\n columns=[\"float{}\".format(i) for i in range(C)],\n index=date_range(\"20000101\", periods=N, freq=\"H\"),\n )\n self.df[\"object\"] = tm.makeStringIndex(N)\n self.bio_read = BytesIO()\n self.writer_read = ExcelWriter(self.bio_read, engine=engine)\n self.df.to_excel(self.writer_read, sheet_name=\"Sheet1\")\n self.writer_read.save()\n self.bio_read.seek(0)\n\n def time_read_excel(self, engine):\n read_excel(self.bio_read)\n\n def time_write_excel(self, engine):\n bio_write = BytesIO()\n bio_write.seek(0)\n writer_write = ExcelWriter(bio_write, engine=engine)\n self.df.to_excel(writer_write, sheet_name=\"Sheet1\")\n writer_write.save()\n\n\nfrom ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "path": "asv_bench/benchmarks/io/excel.py"}], "after_files": [{"content": "from io import BytesIO\n\nimport numpy as np\nfrom odf.opendocument import OpenDocumentSpreadsheet\nfrom odf.table import Table, TableCell, TableRow\nfrom odf.text import P\n\nfrom pandas import DataFrame, ExcelWriter, date_range, read_excel\nimport pandas.util.testing as tm\n\n\ndef _generate_dataframe():\n N = 2000\n C = 5\n df = DataFrame(\n np.random.randn(N, C),\n columns=[\"float{}\".format(i) for i in range(C)],\n index=date_range(\"20000101\", periods=N, freq=\"H\"),\n )\n df[\"object\"] = tm.makeStringIndex(N)\n return df\n\n\nclass WriteExcel:\n\n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n\n def setup(self, engine):\n self.df = _generate_dataframe()\n\n def time_write_excel(self, engine):\n bio = BytesIO()\n bio.seek(0)\n writer = ExcelWriter(bio, engine=engine)\n self.df.to_excel(writer, sheet_name=\"Sheet1\")\n writer.save()\n\n\nclass ReadExcel:\n\n params = [\"xlrd\", \"openpyxl\", \"odf\"]\n param_names = [\"engine\"]\n fname_excel = \"spreadsheet.xlsx\"\n fname_odf = \"spreadsheet.ods\"\n\n def _create_odf(self):\n doc = OpenDocumentSpreadsheet()\n table = Table(name=\"Table1\")\n for row in self.df.values:\n tr = TableRow()\n for val in row:\n tc = TableCell(valuetype=\"string\")\n tc.addElement(P(text=val))\n tr.addElement(tc)\n table.addElement(tr)\n\n doc.spreadsheet.addElement(table)\n doc.save(self.fname_odf)\n\n def setup_cache(self):\n self.df = _generate_dataframe()\n\n self.df.to_excel(self.fname_excel, sheet_name=\"Sheet1\")\n self._create_odf()\n\n def time_read_excel(self, engine):\n fname = self.fname_odf if engine == \"odf\" else self.fname_excel\n read_excel(fname, engine=engine)\n\n\nfrom ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "path": "asv_bench/benchmarks/io/excel.py"}]} | 683 | 812 |
gh_patches_debug_27454 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4530 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CloudFormation EC2Credentials.py does evaluate if the UserData is base64 encoded
**Describe the issue**
The check EC2Credentials.py which checks the UserData of a CloudFormation template against secrets does not work if the userdata are already encoded in base64.
The check is not returning the correct result and the processing time is very slow.
**Examples**
```yaml
AWSTemplateFormatVersion: "2010-09-09"
Resources:
Resource0:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-04169656fea786776
UserData: IyEgL2Jpbi9iYXNoCnN1ZG8gYXB0LWdldCB1cGRhdGUKc3VkbyBhcHQtZ2V0IGluc3RhbGwgLXkgYXBhY2hlMgpzdWRvIHN5c3RlbWN0bCBzdGFydCBhcGFjaGUyCnN1ZG8gc3lzdGVtY3RsIGVuYWJsZSBhcGFjaGUyCmV4cG9ydCBBV1NfQUNDRVNTX0tFWV9JRApleHBvcnQgQVdTX0FDQ0VTU19LRVlfSUQ9QUtJQUlPU0ZPRE5ON0VYQU1QTEUKZXhwb3J0IEFXU19TRUNSRVRfQUNDRVNTX0tFWT13SmFsclhVdG5GRU1JL0s3TURFTkcvYlB4UmZpQ1lFWEFNUExFS0VZCmV4cG9ydCBBV1NfREVGQVVMVF9SRUdJT049dXMtd2VzdC0yCmVjaG8gIjxoMT5EZXBsb3llZCB2aWEgVGVycmFmb3JtPC9oMT4iIHwgc3VkbyB0ZWUgL3Zhci93d3cvaHRtbC9pbmRleC5odG1s
```
**Version (please complete the following information):**
- Checkov Version [e.g. 22]
- >2.0.0
**Additional context**
Fix implemented here by trying to decode the base64 string.
https://github.com/j2clerck/checkov/commit/af9abd724520ec21ec5510dfc5db2ef83fd9e6dc
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/cloudformation/checks/resource/aws/EC2Credentials.py`
Content:
```
1 from typing import List
2
3 from checkov.common.models.enums import CheckResult, CheckCategories
4 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
5 from checkov.common.util.secrets import get_secrets_from_string
6
7
8 class EC2Credentials(BaseResourceCheck):
9 def __init__(self):
10 name = "Ensure no hard-coded secrets exist in EC2 user data"
11 id = "CKV_AWS_46"
12 supported_resources = ['AWS::EC2::Instance']
13 categories = [CheckCategories.SECRETS]
14 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
15
16 def scan_resource_conf(self, conf):
17 if 'Properties' in conf.keys():
18 if 'UserData' in conf['Properties'].keys():
19 user_data = conf['Properties']['UserData']
20 # Cast to string as user data object can look slightly different depending
21 # on Yaml or JSON CF Templates and how the B64 conversion is done.
22 user_data_str = str(user_data)
23 if isinstance(user_data_str, str):
24 secrets = get_secrets_from_string(str(user_data_str))
25 if secrets:
26 for idx, secret in enumerate(secrets):
27 conf[f'{self.id}_secret_{idx}'] = secret
28 return CheckResult.FAILED
29 return CheckResult.PASSED
30
31 def get_evaluated_keys(self) -> List[str]:
32 return ["Properties/UserData"]
33
34
35 check = EC2Credentials()
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py
--- a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py
+++ b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py
@@ -1,4 +1,5 @@
from typing import List
+from base64 import b64decode
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
@@ -17,9 +18,15 @@
if 'Properties' in conf.keys():
if 'UserData' in conf['Properties'].keys():
user_data = conf['Properties']['UserData']
- # Cast to string as user data object can look slightly different depending
- # on Yaml or JSON CF Templates and how the B64 conversion is done.
- user_data_str = str(user_data)
+ # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes)
+ # the get_secrets_from_string function.
+ try:
+ user_data_str = b64decode(user_data).decode()
+ except Exception:
+ # Cast to string as user data object can look slightly different depending
+ # on Yaml or JSON CF Templates and how the B64 conversion is done.
+ user_data_str = str(user_data)
+
if isinstance(user_data_str, str):
secrets = get_secrets_from_string(str(user_data_str))
if secrets:
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n--- a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n+++ b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n@@ -1,4 +1,5 @@\n from typing import List\n+from base64 import b64decode\n \n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n@@ -17,9 +18,15 @@\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n- # Cast to string as user data object can look slightly different depending\n- # on Yaml or JSON CF Templates and how the B64 conversion is done.\n- user_data_str = str(user_data)\n+ # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes)\n+ # the get_secrets_from_string function.\n+ try:\n+ user_data_str = b64decode(user_data).decode()\n+ except Exception:\n+ # Cast to string as user data object can look slightly different depending\n+ # on Yaml or JSON CF Templates and how the B64 conversion is done.\n+ user_data_str = str(user_data)\n+\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n", "issue": "CloudFormation EC2Credentials.py does evaluate if the UserData is base64 encoded\n**Describe the issue**\r\nThe check EC2Credentials.py which checks the UserData of a CloudFormation template against secrets does not work if the userdata are already encoded in base64. \r\nThe check is not returning the correct result and the processing time is very slow.\r\n\r\n**Examples**\r\n```yaml\r\nAWSTemplateFormatVersion: \"2010-09-09\"\r\nResources:\r\n Resource0:\r\n Type: AWS::EC2::Instance\r\n Properties:\r\n ImageId: ami-04169656fea786776\r\n UserData: IyEgL2Jpbi9iYXNoCnN1ZG8gYXB0LWdldCB1cGRhdGUKc3VkbyBhcHQtZ2V0IGluc3RhbGwgLXkgYXBhY2hlMgpzdWRvIHN5c3RlbWN0bCBzdGFydCBhcGFjaGUyCnN1ZG8gc3lzdGVtY3RsIGVuYWJsZSBhcGFjaGUyCmV4cG9ydCBBV1NfQUNDRVNTX0tFWV9JRApleHBvcnQgQVdTX0FDQ0VTU19LRVlfSUQ9QUtJQUlPU0ZPRE5ON0VYQU1QTEUKZXhwb3J0IEFXU19TRUNSRVRfQUNDRVNTX0tFWT13SmFsclhVdG5GRU1JL0s3TURFTkcvYlB4UmZpQ1lFWEFNUExFS0VZCmV4cG9ydCBBV1NfREVGQVVMVF9SRUdJT049dXMtd2VzdC0yCmVjaG8gIjxoMT5EZXBsb3llZCB2aWEgVGVycmFmb3JtPC9oMT4iIHwgc3VkbyB0ZWUgL3Zhci93d3cvaHRtbC9pbmRleC5odG1s\r\n\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version [e.g. 22]\r\n - >2.0.0\r\n\r\n**Additional context**\r\nFix implemented here by trying to decode the base64 string.\r\nhttps://github.com/j2clerck/checkov/commit/af9abd724520ec21ec5510dfc5db2ef83fd9e6dc\r\n\n", "before_files": [{"content": "from typing import List\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.secrets import get_secrets_from_string\n\n\nclass EC2Credentials(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure no hard-coded secrets exist in EC2 user data\"\n id = \"CKV_AWS_46\"\n supported_resources = ['AWS::EC2::Instance']\n categories = [CheckCategories.SECRETS]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n # Cast to string as user data object can look slightly different depending\n # on Yaml or JSON CF Templates and how the B64 conversion is done.\n user_data_str = str(user_data)\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n for idx, secret in enumerate(secrets):\n conf[f'{self.id}_secret_{idx}'] = secret\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"Properties/UserData\"]\n\n\ncheck = EC2Credentials()\n", "path": "checkov/cloudformation/checks/resource/aws/EC2Credentials.py"}], "after_files": [{"content": "from typing import List\nfrom base64 import b64decode\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.secrets import get_secrets_from_string\n\n\nclass EC2Credentials(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure no hard-coded secrets exist in EC2 user data\"\n id = \"CKV_AWS_46\"\n supported_resources = ['AWS::EC2::Instance']\n categories = [CheckCategories.SECRETS]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes)\n # the get_secrets_from_string function.\n try:\n user_data_str = b64decode(user_data).decode()\n except Exception:\n # Cast to string as user data object can look slightly different depending\n # on Yaml or JSON CF Templates and how the B64 conversion is done.\n user_data_str = str(user_data)\n\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n for idx, secret in enumerate(secrets):\n conf[f'{self.id}_secret_{idx}'] = secret\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"Properties/UserData\"]\n\n\ncheck = EC2Credentials()\n", "path": "checkov/cloudformation/checks/resource/aws/EC2Credentials.py"}]} | 1,242 | 342 |
gh_patches_debug_1794 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4053 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle `normed` to `density` deprecations in `histogram`
## Description
See https://github.com/scikit-image/scikit-image/pull/3427 and the related issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/examples/features_detection/plot_local_binary_pattern.py`
Content:
```
1 """
2 ===============================================
3 Local Binary Pattern for texture classification
4 ===============================================
5
6 In this example, we will see how to classify textures based on LBP (Local
7 Binary Pattern). LBP looks at points surrounding a central point and tests
8 whether the surrounding points are greater than or less than the central point
9 (i.e. gives a binary result).
10
11 Before trying out LBP on an image, it helps to look at a schematic of LBPs.
12 The below code is just used to plot the schematic.
13 """
14 import numpy as np
15 import matplotlib.pyplot as plt
16
17
18 METHOD = 'uniform'
19 plt.rcParams['font.size'] = 9
20
21
22 def plot_circle(ax, center, radius, color):
23 circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
24 ax.add_patch(circle)
25
26
27 def plot_lbp_model(ax, binary_values):
28 """Draw the schematic for a local binary pattern."""
29 # Geometry spec
30 theta = np.deg2rad(45)
31 R = 1
32 r = 0.15
33 w = 1.5
34 gray = '0.5'
35
36 # Draw the central pixel.
37 plot_circle(ax, (0, 0), radius=r, color=gray)
38 # Draw the surrounding pixels.
39 for i, facecolor in enumerate(binary_values):
40 x = R * np.cos(i * theta)
41 y = R * np.sin(i * theta)
42 plot_circle(ax, (x, y), radius=r, color=str(facecolor))
43
44 # Draw the pixel grid.
45 for x in np.linspace(-w, w, 4):
46 ax.axvline(x, color=gray)
47 ax.axhline(x, color=gray)
48
49 # Tweak the layout.
50 ax.axis('image')
51 ax.axis('off')
52 size = w + 0.2
53 ax.set_xlim(-size, size)
54 ax.set_ylim(-size, size)
55
56
57 fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
58
59 titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
60
61 binary_patterns = [np.zeros(8),
62 np.ones(8),
63 np.hstack([np.ones(4), np.zeros(4)]),
64 np.hstack([np.zeros(3), np.ones(5)]),
65 [1, 0, 0, 1, 1, 1, 0, 0]]
66
67 for ax, values, name in zip(axes, binary_patterns, titles):
68 plot_lbp_model(ax, values)
69 ax.set_title(name)
70
71 ######################################################################
72 # The figure above shows example results with black (or white) representing
73 # pixels that are less (or more) intense than the central pixel. When
74 # surrounding pixels are all black or all white, then that image region is
75 # flat (i.e. featureless). Groups of continuous black or white pixels are
76 # considered "uniform" patterns that can be interpreted as corners or edges.
77 # If pixels switch back-and-forth between black and white pixels, the pattern
78 # is considered "non-uniform".
79 #
80 # When using LBP to detect texture, you measure a collection of LBPs over an
81 # image patch and look at the distribution of these LBPs. Lets apply LBP to a
82 # brick texture.
83
84 from skimage.transform import rotate
85 from skimage.feature import local_binary_pattern
86 from skimage import data
87 from skimage.color import label2rgb
88
89 # settings for LBP
90 radius = 3
91 n_points = 8 * radius
92
93
94 def overlay_labels(image, lbp, labels):
95 mask = np.logical_or.reduce([lbp == each for each in labels])
96 return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
97
98
99 def highlight_bars(bars, indexes):
100 for i in indexes:
101 bars[i].set_facecolor('r')
102
103
104 image = data.brick()
105 lbp = local_binary_pattern(image, n_points, radius, METHOD)
106
107
108 def hist(ax, lbp):
109 n_bins = int(lbp.max() + 1)
110 return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
111 facecolor='0.5')
112
113
114 # plot histograms of LBP of textures
115 fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
116 plt.gray()
117
118 titles = ('edge', 'flat', 'corner')
119 w = width = radius - 1
120 edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
121 flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
122 i_14 = n_points // 4 # 1/4th of the histogram
123 i_34 = 3 * (n_points // 4) # 3/4th of the histogram
124 corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
125 list(range(i_34 - w, i_34 + w + 1)))
126
127 label_sets = (edge_labels, flat_labels, corner_labels)
128
129 for ax, labels in zip(ax_img, label_sets):
130 ax.imshow(overlay_labels(image, lbp, labels))
131
132 for ax, labels, name in zip(ax_hist, label_sets, titles):
133 counts, _, bars = hist(ax, lbp)
134 highlight_bars(bars, labels)
135 ax.set_ylim(top=np.max(counts[:-1]))
136 ax.set_xlim(right=n_points + 2)
137 ax.set_title(name)
138
139 ax_hist[0].set_ylabel('Percentage')
140 for ax in ax_img:
141 ax.axis('off')
142
143
144 ######################################################################
145 # The above plot highlights flat, edge-like, and corner-like regions of the
146 # image.
147 #
148 # The histogram of the LBP result is a good measure to classify textures.
149 # Here, we test the histogram distributions against each other using the
150 # Kullback-Leibler-Divergence.
151
152 # settings for LBP
153 radius = 2
154 n_points = 8 * radius
155
156
157 def kullback_leibler_divergence(p, q):
158 p = np.asarray(p)
159 q = np.asarray(q)
160 filt = np.logical_and(p != 0, q != 0)
161 return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
162
163
164 def match(refs, img):
165 best_score = 10
166 best_name = None
167 lbp = local_binary_pattern(img, n_points, radius, METHOD)
168 n_bins = int(lbp.max() + 1)
169 hist, _ = np.histogram(lbp, density=True, bins=n_bins, range=(0, n_bins))
170 for name, ref in refs.items():
171 ref_hist, _ = np.histogram(ref, density=True, bins=n_bins,
172 range=(0, n_bins))
173 score = kullback_leibler_divergence(hist, ref_hist)
174 if score < best_score:
175 best_score = score
176 best_name = name
177 return best_name
178
179
180 brick = data.brick()
181 grass = data.grass()
182 gravel = data.gravel()
183
184 refs = {
185 'brick': local_binary_pattern(brick, n_points, radius, METHOD),
186 'grass': local_binary_pattern(grass, n_points, radius, METHOD),
187 'gravel': local_binary_pattern(gravel, n_points, radius, METHOD)
188 }
189
190 # classify rotated textures
191 print('Rotated images matched against references using LBP:')
192 print('original: brick, rotated: 30deg, match result: ',
193 match(refs, rotate(brick, angle=30, resize=False)))
194 print('original: brick, rotated: 70deg, match result: ',
195 match(refs, rotate(brick, angle=70, resize=False)))
196 print('original: grass, rotated: 145deg, match result: ',
197 match(refs, rotate(grass, angle=145, resize=False)))
198
199 # plot histograms of LBP of textures
200 fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
201 figsize=(9, 6))
202 plt.gray()
203
204 ax1.imshow(brick)
205 ax1.axis('off')
206 hist(ax4, refs['brick'])
207 ax4.set_ylabel('Percentage')
208
209 ax2.imshow(grass)
210 ax2.axis('off')
211 hist(ax5, refs['grass'])
212 ax5.set_xlabel('Uniform LBP values')
213
214 ax3.imshow(gravel)
215 ax3.axis('off')
216 hist(ax6, refs['gravel'])
217
218 plt.show()
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/examples/features_detection/plot_local_binary_pattern.py b/doc/examples/features_detection/plot_local_binary_pattern.py
--- a/doc/examples/features_detection/plot_local_binary_pattern.py
+++ b/doc/examples/features_detection/plot_local_binary_pattern.py
@@ -107,7 +107,7 @@
def hist(ax, lbp):
n_bins = int(lbp.max() + 1)
- return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
+ return ax.hist(lbp.ravel(), density=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
| {"golden_diff": "diff --git a/doc/examples/features_detection/plot_local_binary_pattern.py b/doc/examples/features_detection/plot_local_binary_pattern.py\n--- a/doc/examples/features_detection/plot_local_binary_pattern.py\n+++ b/doc/examples/features_detection/plot_local_binary_pattern.py\n@@ -107,7 +107,7 @@\n \n def hist(ax, lbp):\n n_bins = int(lbp.max() + 1)\n- return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),\n+ return ax.hist(lbp.ravel(), density=True, bins=n_bins, range=(0, n_bins),\n facecolor='0.5')\n", "issue": "Handle `normed` to `density` deprecations in `histogram` \n## Description\r\nSee https://github.com/scikit-image/scikit-image/pull/3427 and the related issues.\r\n\n", "before_files": [{"content": "\"\"\"\n===============================================\nLocal Binary Pattern for texture classification\n===============================================\n\nIn this example, we will see how to classify textures based on LBP (Local\nBinary Pattern). LBP looks at points surrounding a central point and tests\nwhether the surrounding points are greater than or less than the central point\n(i.e. gives a binary result).\n\nBefore trying out LBP on an image, it helps to look at a schematic of LBPs.\nThe below code is just used to plot the schematic.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nMETHOD = 'uniform'\nplt.rcParams['font.size'] = 9\n\n\ndef plot_circle(ax, center, radius, color):\n circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')\n ax.add_patch(circle)\n\n\ndef plot_lbp_model(ax, binary_values):\n \"\"\"Draw the schematic for a local binary pattern.\"\"\"\n # Geometry spec\n theta = np.deg2rad(45)\n R = 1\n r = 0.15\n w = 1.5\n gray = '0.5'\n\n # Draw the central pixel.\n plot_circle(ax, (0, 0), radius=r, color=gray)\n # Draw the surrounding pixels.\n for i, facecolor in enumerate(binary_values):\n x = R * np.cos(i * theta)\n y = R * np.sin(i * theta)\n plot_circle(ax, (x, y), radius=r, color=str(facecolor))\n\n # Draw the pixel grid.\n for x in np.linspace(-w, w, 4):\n ax.axvline(x, color=gray)\n ax.axhline(x, color=gray)\n\n # Tweak the layout.\n ax.axis('image')\n ax.axis('off')\n size = w + 0.2\n ax.set_xlim(-size, size)\n ax.set_ylim(-size, size)\n\n\nfig, axes = plt.subplots(ncols=5, figsize=(7, 2))\n\ntitles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']\n\nbinary_patterns = [np.zeros(8),\n np.ones(8),\n np.hstack([np.ones(4), np.zeros(4)]),\n np.hstack([np.zeros(3), np.ones(5)]),\n [1, 0, 0, 1, 1, 1, 0, 0]]\n\nfor ax, values, name in zip(axes, binary_patterns, titles):\n plot_lbp_model(ax, values)\n ax.set_title(name)\n\n######################################################################\n# The figure above shows example results with black (or white) representing\n# pixels that are less (or more) intense than the central pixel. When\n# surrounding pixels are all black or all white, then that image region is\n# flat (i.e. featureless). Groups of continuous black or white pixels are\n# considered \"uniform\" patterns that can be interpreted as corners or edges.\n# If pixels switch back-and-forth between black and white pixels, the pattern\n# is considered \"non-uniform\".\n#\n# When using LBP to detect texture, you measure a collection of LBPs over an\n# image patch and look at the distribution of these LBPs. Lets apply LBP to a\n# brick texture.\n\nfrom skimage.transform import rotate\nfrom skimage.feature import local_binary_pattern\nfrom skimage import data\nfrom skimage.color import label2rgb\n\n# settings for LBP\nradius = 3\nn_points = 8 * radius\n\n\ndef overlay_labels(image, lbp, labels):\n mask = np.logical_or.reduce([lbp == each for each in labels])\n return label2rgb(mask, image=image, bg_label=0, alpha=0.5)\n\n\ndef highlight_bars(bars, indexes):\n for i in indexes:\n bars[i].set_facecolor('r')\n\n\nimage = data.brick()\nlbp = local_binary_pattern(image, n_points, radius, METHOD)\n\n\ndef hist(ax, lbp):\n n_bins = int(lbp.max() + 1)\n return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),\n facecolor='0.5')\n\n\n# plot histograms of LBP of textures\nfig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))\nplt.gray()\n\ntitles = ('edge', 'flat', 'corner')\nw = width = radius - 1\nedge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)\nflat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))\ni_14 = n_points // 4 # 1/4th of the histogram\ni_34 = 3 * (n_points // 4) # 3/4th of the histogram\ncorner_labels = (list(range(i_14 - w, i_14 + w + 1)) +\n list(range(i_34 - w, i_34 + w + 1)))\n\nlabel_sets = (edge_labels, flat_labels, corner_labels)\n\nfor ax, labels in zip(ax_img, label_sets):\n ax.imshow(overlay_labels(image, lbp, labels))\n\nfor ax, labels, name in zip(ax_hist, label_sets, titles):\n counts, _, bars = hist(ax, lbp)\n highlight_bars(bars, labels)\n ax.set_ylim(top=np.max(counts[:-1]))\n ax.set_xlim(right=n_points + 2)\n ax.set_title(name)\n\nax_hist[0].set_ylabel('Percentage')\nfor ax in ax_img:\n ax.axis('off')\n\n\n######################################################################\n# The above plot highlights flat, edge-like, and corner-like regions of the\n# image.\n#\n# The histogram of the LBP result is a good measure to classify textures.\n# Here, we test the histogram distributions against each other using the\n# Kullback-Leibler-Divergence.\n\n# settings for LBP\nradius = 2\nn_points = 8 * radius\n\n\ndef kullback_leibler_divergence(p, q):\n p = np.asarray(p)\n q = np.asarray(q)\n filt = np.logical_and(p != 0, q != 0)\n return np.sum(p[filt] * np.log2(p[filt] / q[filt]))\n\n\ndef match(refs, img):\n best_score = 10\n best_name = None\n lbp = local_binary_pattern(img, n_points, radius, METHOD)\n n_bins = int(lbp.max() + 1)\n hist, _ = np.histogram(lbp, density=True, bins=n_bins, range=(0, n_bins))\n for name, ref in refs.items():\n ref_hist, _ = np.histogram(ref, density=True, bins=n_bins,\n range=(0, n_bins))\n score = kullback_leibler_divergence(hist, ref_hist)\n if score < best_score:\n best_score = score\n best_name = name\n return best_name\n\n\nbrick = data.brick()\ngrass = data.grass()\ngravel = data.gravel()\n\nrefs = {\n 'brick': local_binary_pattern(brick, n_points, radius, METHOD),\n 'grass': local_binary_pattern(grass, n_points, radius, METHOD),\n 'gravel': local_binary_pattern(gravel, n_points, radius, METHOD)\n}\n\n# classify rotated textures\nprint('Rotated images matched against references using LBP:')\nprint('original: brick, rotated: 30deg, match result: ',\n match(refs, rotate(brick, angle=30, resize=False)))\nprint('original: brick, rotated: 70deg, match result: ',\n match(refs, rotate(brick, angle=70, resize=False)))\nprint('original: grass, rotated: 145deg, match result: ',\n match(refs, rotate(grass, angle=145, resize=False)))\n\n# plot histograms of LBP of textures\nfig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,\n figsize=(9, 6))\nplt.gray()\n\nax1.imshow(brick)\nax1.axis('off')\nhist(ax4, refs['brick'])\nax4.set_ylabel('Percentage')\n\nax2.imshow(grass)\nax2.axis('off')\nhist(ax5, refs['grass'])\nax5.set_xlabel('Uniform LBP values')\n\nax3.imshow(gravel)\nax3.axis('off')\nhist(ax6, refs['gravel'])\n\nplt.show()\n", "path": "doc/examples/features_detection/plot_local_binary_pattern.py"}], "after_files": [{"content": "\"\"\"\n===============================================\nLocal Binary Pattern for texture classification\n===============================================\n\nIn this example, we will see how to classify textures based on LBP (Local\nBinary Pattern). LBP looks at points surrounding a central point and tests\nwhether the surrounding points are greater than or less than the central point\n(i.e. gives a binary result).\n\nBefore trying out LBP on an image, it helps to look at a schematic of LBPs.\nThe below code is just used to plot the schematic.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nMETHOD = 'uniform'\nplt.rcParams['font.size'] = 9\n\n\ndef plot_circle(ax, center, radius, color):\n circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')\n ax.add_patch(circle)\n\n\ndef plot_lbp_model(ax, binary_values):\n \"\"\"Draw the schematic for a local binary pattern.\"\"\"\n # Geometry spec\n theta = np.deg2rad(45)\n R = 1\n r = 0.15\n w = 1.5\n gray = '0.5'\n\n # Draw the central pixel.\n plot_circle(ax, (0, 0), radius=r, color=gray)\n # Draw the surrounding pixels.\n for i, facecolor in enumerate(binary_values):\n x = R * np.cos(i * theta)\n y = R * np.sin(i * theta)\n plot_circle(ax, (x, y), radius=r, color=str(facecolor))\n\n # Draw the pixel grid.\n for x in np.linspace(-w, w, 4):\n ax.axvline(x, color=gray)\n ax.axhline(x, color=gray)\n\n # Tweak the layout.\n ax.axis('image')\n ax.axis('off')\n size = w + 0.2\n ax.set_xlim(-size, size)\n ax.set_ylim(-size, size)\n\n\nfig, axes = plt.subplots(ncols=5, figsize=(7, 2))\n\ntitles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']\n\nbinary_patterns = [np.zeros(8),\n np.ones(8),\n np.hstack([np.ones(4), np.zeros(4)]),\n np.hstack([np.zeros(3), np.ones(5)]),\n [1, 0, 0, 1, 1, 1, 0, 0]]\n\nfor ax, values, name in zip(axes, binary_patterns, titles):\n plot_lbp_model(ax, values)\n ax.set_title(name)\n\n######################################################################\n# The figure above shows example results with black (or white) representing\n# pixels that are less (or more) intense than the central pixel. When\n# surrounding pixels are all black or all white, then that image region is\n# flat (i.e. featureless). Groups of continuous black or white pixels are\n# considered \"uniform\" patterns that can be interpreted as corners or edges.\n# If pixels switch back-and-forth between black and white pixels, the pattern\n# is considered \"non-uniform\".\n#\n# When using LBP to detect texture, you measure a collection of LBPs over an\n# image patch and look at the distribution of these LBPs. Lets apply LBP to a\n# brick texture.\n\nfrom skimage.transform import rotate\nfrom skimage.feature import local_binary_pattern\nfrom skimage import data\nfrom skimage.color import label2rgb\n\n# settings for LBP\nradius = 3\nn_points = 8 * radius\n\n\ndef overlay_labels(image, lbp, labels):\n mask = np.logical_or.reduce([lbp == each for each in labels])\n return label2rgb(mask, image=image, bg_label=0, alpha=0.5)\n\n\ndef highlight_bars(bars, indexes):\n for i in indexes:\n bars[i].set_facecolor('r')\n\n\nimage = data.brick()\nlbp = local_binary_pattern(image, n_points, radius, METHOD)\n\n\ndef hist(ax, lbp):\n n_bins = int(lbp.max() + 1)\n return ax.hist(lbp.ravel(), density=True, bins=n_bins, range=(0, n_bins),\n facecolor='0.5')\n\n\n# plot histograms of LBP of textures\nfig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))\nplt.gray()\n\ntitles = ('edge', 'flat', 'corner')\nw = width = radius - 1\nedge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)\nflat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))\ni_14 = n_points // 4 # 1/4th of the histogram\ni_34 = 3 * (n_points // 4) # 3/4th of the histogram\ncorner_labels = (list(range(i_14 - w, i_14 + w + 1)) +\n list(range(i_34 - w, i_34 + w + 1)))\n\nlabel_sets = (edge_labels, flat_labels, corner_labels)\n\nfor ax, labels in zip(ax_img, label_sets):\n ax.imshow(overlay_labels(image, lbp, labels))\n\nfor ax, labels, name in zip(ax_hist, label_sets, titles):\n counts, _, bars = hist(ax, lbp)\n highlight_bars(bars, labels)\n ax.set_ylim(top=np.max(counts[:-1]))\n ax.set_xlim(right=n_points + 2)\n ax.set_title(name)\n\nax_hist[0].set_ylabel('Percentage')\nfor ax in ax_img:\n ax.axis('off')\n\n\n######################################################################\n# The above plot highlights flat, edge-like, and corner-like regions of the\n# image.\n#\n# The histogram of the LBP result is a good measure to classify textures.\n# Here, we test the histogram distributions against each other using the\n# Kullback-Leibler-Divergence.\n\n# settings for LBP\nradius = 2\nn_points = 8 * radius\n\n\ndef kullback_leibler_divergence(p, q):\n p = np.asarray(p)\n q = np.asarray(q)\n filt = np.logical_and(p != 0, q != 0)\n return np.sum(p[filt] * np.log2(p[filt] / q[filt]))\n\n\ndef match(refs, img):\n best_score = 10\n best_name = None\n lbp = local_binary_pattern(img, n_points, radius, METHOD)\n n_bins = int(lbp.max() + 1)\n hist, _ = np.histogram(lbp, density=True, bins=n_bins, range=(0, n_bins))\n for name, ref in refs.items():\n ref_hist, _ = np.histogram(ref, density=True, bins=n_bins,\n range=(0, n_bins))\n score = kullback_leibler_divergence(hist, ref_hist)\n if score < best_score:\n best_score = score\n best_name = name\n return best_name\n\n\nbrick = data.brick()\ngrass = data.grass()\ngravel = data.gravel()\n\nrefs = {\n 'brick': local_binary_pattern(brick, n_points, radius, METHOD),\n 'grass': local_binary_pattern(grass, n_points, radius, METHOD),\n 'gravel': local_binary_pattern(gravel, n_points, radius, METHOD)\n}\n\n# classify rotated textures\nprint('Rotated images matched against references using LBP:')\nprint('original: brick, rotated: 30deg, match result: ',\n match(refs, rotate(brick, angle=30, resize=False)))\nprint('original: brick, rotated: 70deg, match result: ',\n match(refs, rotate(brick, angle=70, resize=False)))\nprint('original: grass, rotated: 145deg, match result: ',\n match(refs, rotate(grass, angle=145, resize=False)))\n\n# plot histograms of LBP of textures\nfig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,\n figsize=(9, 6))\nplt.gray()\n\nax1.imshow(brick)\nax1.axis('off')\nhist(ax4, refs['brick'])\nax4.set_ylabel('Percentage')\n\nax2.imshow(grass)\nax2.axis('off')\nhist(ax5, refs['grass'])\nax5.set_xlabel('Uniform LBP values')\n\nax3.imshow(gravel)\nax3.axis('off')\nhist(ax6, refs['gravel'])\n\nplt.show()\n", "path": "doc/examples/features_detection/plot_local_binary_pattern.py"}]} | 2,746 | 142 |
gh_patches_debug_17541 | rasdani/github-patches | git_diff | liqd__a4-opin-284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter draft projects from all listings
Projects that are in draft mode should be removed from most listings:
- [x] latest projects for all users
- [x] other projects for all users
- [x] organisation page for non-initiators
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/organisations/views.py`
Content:
```
1 from django.views import generic
2
3 from . import models
4
5
6 class OrganisationDetailView(generic.DetailView):
7 model = models.Organisation
8
9
10 class OrganisationListView(generic.ListView):
11 model = models.Organisation
12 paginate_by = 10
13
```
Path: `euth/projects/models.py`
Content:
```
1 from django.conf import settings
2 from django.db import models
3 from django.utils import functional, timezone
4
5 from contrib.transforms import html_transforms
6 from euth.contrib import base_models, validators
7 from euth.organisations import models as org_models
8
9
10 class ProjectManager(models.Manager):
11
12 def get_by_natural_key(self, name):
13 return self.get(name=name)
14
15 def featured(self):
16 return self.filter(is_draft=False).order_by('-created')[:8]
17
18
19 class Project(base_models.TimeStampedModel):
20 slug = models.SlugField(max_length=512, unique=True)
21 name = models.CharField(max_length=512)
22 organisation = models.ForeignKey(
23 org_models.Organisation, on_delete=models.CASCADE)
24 description = models.CharField(max_length=1024)
25 information = models.TextField()
26 is_public = models.BooleanField(default=True)
27 is_draft = models.BooleanField(default=True)
28 image = models.ImageField(
29 upload_to='projects/backgrounds',
30 blank=True,
31 validators=[validators.validate_hero_image])
32 participants = models.ManyToManyField(
33 settings.AUTH_USER_MODEL,
34 related_name='project_participant',
35 blank=True,
36 )
37 moderators = models.ManyToManyField(
38 settings.AUTH_USER_MODEL,
39 related_name='project_moderator'
40 )
41
42 objects = ProjectManager()
43
44 def __str__(self):
45 return self.name
46
47 def save(self, *args, **kwargs):
48 self.information = html_transforms.clean_html_field(
49 self.information)
50 super(Project, self).save(*args, **kwargs)
51
52 def get_absolute_url(self):
53 from django.core.urlresolvers import reverse
54 return reverse('project-detail', args=[str(self.slug)])
55
56 def has_member(self, user):
57 """
58 Everybody is member of all public projects and private projects can
59 be joined as moderator or participant.
60 """
61 return (
62 (user.is_authenticated() and self.is_public)
63 or (user in self.participants.all())
64 or (user in self.moderators.all())
65 )
66
67 @functional.cached_property
68 def other_projects(self):
69 other_projects = self.organisation.project_set.all().exclude(
70 slug=self.slug)
71 return other_projects
72
73 @functional.cached_property
74 def is_private(self):
75 return not self.is_public
76
77 @functional.cached_property
78 def active_phase(self):
79 from euth.phases import models as phase_models
80 return phase_models.Phase.objects\
81 .filter(module__project=self)\
82 .active_phases()\
83 .first()
84
85 @property
86 def days_left(self):
87 if self.active_phase:
88 today = timezone.now().replace(hour=0, minute=0, second=0)
89 time_delta = self.active_phase.end_date - today
90 return time_delta.days
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/organisations/views.py b/euth/organisations/views.py
--- a/euth/organisations/views.py
+++ b/euth/organisations/views.py
@@ -6,6 +6,12 @@
class OrganisationDetailView(generic.DetailView):
model = models.Organisation
+ def visible_projects(self):
+ if self.request.user in self.object.initiators.all():
+ return self.object.project_set.all()
+ else:
+ return self.object.project_set.filter(is_draft=False)
+
class OrganisationListView(generic.ListView):
model = models.Organisation
diff --git a/euth/projects/models.py b/euth/projects/models.py
--- a/euth/projects/models.py
+++ b/euth/projects/models.py
@@ -66,8 +66,8 @@
@functional.cached_property
def other_projects(self):
- other_projects = self.organisation.project_set.all().exclude(
- slug=self.slug)
+ other_projects = self.organisation.project_set\
+ .filter(is_draft=False).exclude(slug=self.slug)
return other_projects
@functional.cached_property
| {"golden_diff": "diff --git a/euth/organisations/views.py b/euth/organisations/views.py\n--- a/euth/organisations/views.py\n+++ b/euth/organisations/views.py\n@@ -6,6 +6,12 @@\n class OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n \n+ def visible_projects(self):\n+ if self.request.user in self.object.initiators.all():\n+ return self.object.project_set.all()\n+ else:\n+ return self.object.project_set.filter(is_draft=False)\n+\n \n class OrganisationListView(generic.ListView):\n model = models.Organisation\ndiff --git a/euth/projects/models.py b/euth/projects/models.py\n--- a/euth/projects/models.py\n+++ b/euth/projects/models.py\n@@ -66,8 +66,8 @@\n \n @functional.cached_property\n def other_projects(self):\n- other_projects = self.organisation.project_set.all().exclude(\n- slug=self.slug)\n+ other_projects = self.organisation.project_set\\\n+ .filter(is_draft=False).exclude(slug=self.slug)\n return other_projects\n \n @functional.cached_property\n", "issue": "Filter draft projects from all listings\nProjects that are in draft mode should be removed from most listings:\n- [x] latest projects for all users\n- [x] other projects for all users\n- [x] organisation page for non-initiators\n\n", "before_files": [{"content": "from django.views import generic\n\nfrom . import models\n\n\nclass OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n\n\nclass OrganisationListView(generic.ListView):\n model = models.Organisation\n paginate_by = 10\n", "path": "euth/organisations/views.py"}, {"content": "from django.conf import settings\nfrom django.db import models\nfrom django.utils import functional, timezone\n\nfrom contrib.transforms import html_transforms\nfrom euth.contrib import base_models, validators\nfrom euth.organisations import models as org_models\n\n\nclass ProjectManager(models.Manager):\n\n def get_by_natural_key(self, name):\n return self.get(name=name)\n\n def featured(self):\n return self.filter(is_draft=False).order_by('-created')[:8]\n\n\nclass Project(base_models.TimeStampedModel):\n slug = models.SlugField(max_length=512, unique=True)\n name = models.CharField(max_length=512)\n organisation = models.ForeignKey(\n org_models.Organisation, on_delete=models.CASCADE)\n description = models.CharField(max_length=1024)\n information = models.TextField()\n is_public = models.BooleanField(default=True)\n is_draft = models.BooleanField(default=True)\n image = models.ImageField(\n upload_to='projects/backgrounds',\n blank=True,\n validators=[validators.validate_hero_image])\n participants = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_participant',\n blank=True,\n )\n moderators = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_moderator'\n )\n\n objects = ProjectManager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.information = html_transforms.clean_html_field(\n self.information)\n super(Project, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('project-detail', args=[str(self.slug)])\n\n def has_member(self, user):\n \"\"\"\n Everybody is member of all public projects and private projects can\n be joined as moderator or participant.\n \"\"\"\n return (\n (user.is_authenticated() and self.is_public)\n or (user in self.participants.all())\n or (user in self.moderators.all())\n )\n\n @functional.cached_property\n def other_projects(self):\n other_projects = self.organisation.project_set.all().exclude(\n slug=self.slug)\n return other_projects\n\n @functional.cached_property\n def is_private(self):\n return not self.is_public\n\n @functional.cached_property\n def active_phase(self):\n from euth.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project=self)\\\n .active_phases()\\\n .first()\n\n @property\n def days_left(self):\n if self.active_phase:\n today = timezone.now().replace(hour=0, minute=0, second=0)\n time_delta = self.active_phase.end_date - today\n return time_delta.days\n", "path": "euth/projects/models.py"}], "after_files": [{"content": "from django.views import generic\n\nfrom . import models\n\n\nclass OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n\n def visible_projects(self):\n if self.request.user in self.object.initiators.all():\n return self.object.project_set.all()\n else:\n return self.object.project_set.filter(is_draft=False)\n\n\nclass OrganisationListView(generic.ListView):\n model = models.Organisation\n paginate_by = 10\n", "path": "euth/organisations/views.py"}, {"content": "from django.conf import settings\nfrom django.db import models\nfrom django.utils import functional, timezone\n\nfrom contrib.transforms import html_transforms\nfrom euth.contrib import base_models, validators\nfrom euth.organisations import models as org_models\n\n\nclass ProjectManager(models.Manager):\n\n def get_by_natural_key(self, name):\n return self.get(name=name)\n\n def featured(self):\n return self.filter(is_draft=False).order_by('-created')[:8]\n\n\nclass Project(base_models.TimeStampedModel):\n slug = models.SlugField(max_length=512, unique=True)\n name = models.CharField(max_length=512)\n organisation = models.ForeignKey(\n org_models.Organisation, on_delete=models.CASCADE)\n description = models.CharField(max_length=1024)\n information = models.TextField()\n is_public = models.BooleanField(default=True)\n is_draft = models.BooleanField(default=True)\n image = models.ImageField(\n upload_to='projects/backgrounds',\n blank=True,\n validators=[validators.validate_hero_image])\n participants = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_participant',\n blank=True,\n )\n moderators = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_moderator'\n )\n\n objects = ProjectManager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.information = html_transforms.clean_html_field(\n self.information)\n super(Project, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('project-detail', args=[str(self.slug)])\n\n def has_member(self, user):\n \"\"\"\n Everybody is member of all public projects and private projects can\n be joined as moderator or participant.\n \"\"\"\n return (\n (user.is_authenticated() and self.is_public)\n or (user in self.participants.all())\n or (user in self.moderators.all())\n )\n\n @functional.cached_property\n def other_projects(self):\n other_projects = self.organisation.project_set\\\n .filter(is_draft=False).exclude(slug=self.slug)\n return other_projects\n\n @functional.cached_property\n def is_private(self):\n return not self.is_public\n\n @functional.cached_property\n def active_phase(self):\n from euth.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project=self)\\\n .active_phases()\\\n .first()\n\n @property\n def days_left(self):\n if self.active_phase:\n today = timezone.now().replace(hour=0, minute=0, second=0)\n time_delta = self.active_phase.end_date - today\n return time_delta.days\n", "path": "euth/projects/models.py"}]} | 1,167 | 243 |
gh_patches_debug_28597 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1354 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NH: legislators scrape is failing
State: NH
Legislator scraper needs some attention
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/nh/legislators.py`
Content:
```
1 import re
2
3 from billy.scrape.legislators import LegislatorScraper, Legislator
4 from openstates.utils import LXMLMixin
5
6
7 class NHLegislatorScraper(LegislatorScraper, LXMLMixin):
8 jurisdiction = 'nh'
9 latest_only = True
10 members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt'
11
12 chamber_map = {'H': 'lower', 'S': 'upper'}
13 party_map = {
14 'D': 'Democratic',
15 'R': 'Republican',
16 'I': 'Independent',
17 'L': 'Libertarian',
18 }
19
20 def _get_photo(self, url, chamber):
21 """Attempts to find a portrait in the given legislator profile."""
22 doc = self.lxmlize(url)
23
24 if chamber == 'upper':
25 src = doc.xpath('//div[@id="page_content"]//img[contains(@src, '
26 '"images/senators") or contains(@src, "Senator")]/@src')
27 elif chamber == 'lower':
28 src = doc.xpath('//img[contains(@src, "images/memberpics")]/@src')
29
30 if src and 'nophoto' not in src[0]:
31 photo_url = src[0]
32 else:
33 photo_url = ''
34
35 return photo_url
36
37 def _parse_legislator(self, row, chamber, term):
38 # Capture legislator vitals.
39 first_name = row['FirstName']
40 middle_name = row['MiddleName']
41 last_name = row['lastname']
42 full_name = '{} {} {}'.format(first_name, middle_name, last_name)
43 full_name = re.sub(r'[\s]{2,}', ' ', full_name)
44
45 district = '{} {}'.format(row['county'], int(row['District'])).strip()
46 party = self.party_map[row['party']]
47 email = row['EMailAddress1']
48
49 legislator = Legislator(term, chamber, district, full_name,
50 first_name=first_name, last_name=last_name,
51 middle_name=middle_name, party=party,
52 email=email)
53
54 # Capture legislator office contact information.
55 district_address = '{}\n{}\n{}, {} {}'.format(row['street'],
56 row['address2'], row['city'], row['state'], row['zipcode']).strip()
57
58 legislator.add_office('district', 'Home Address',
59 address=district_address)
60
61 # Retrieve legislator portrait.
62 #profile_url = None
63 #if chamber == 'upper':
64 # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District'])
65 #elif chamber == 'lower':
66 # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no'])
67
68 #if profile_url:
69 # legislator['photo_url'] = self._get_photo(profile_url, chamber)
70 # legislator.add_source(profile_url)
71
72 return legislator
73
74 def _parse_members_txt(self):
75 lines = self.get(self.members_url).text.splitlines()
76
77 header = lines[0].split('\t')
78
79 for line in lines[1:]:
80 yield dict(zip(header, line.split('\t')))
81
82 def scrape(self, chamber, term):
83 for row in self._parse_members_txt():
84 if self.chamber_map[row['LegislativeBody']] == chamber:
85 leg = self._parse_legislator(row, chamber, term)
86 leg.add_source(self.members_url)
87 self.save_legislator(leg)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/nh/legislators.py b/openstates/nh/legislators.py
--- a/openstates/nh/legislators.py
+++ b/openstates/nh/legislators.py
@@ -38,13 +38,13 @@
# Capture legislator vitals.
first_name = row['FirstName']
middle_name = row['MiddleName']
- last_name = row['lastname']
+ last_name = row['LastName']
full_name = '{} {} {}'.format(first_name, middle_name, last_name)
full_name = re.sub(r'[\s]{2,}', ' ', full_name)
- district = '{} {}'.format(row['county'], int(row['District'])).strip()
- party = self.party_map[row['party']]
- email = row['EMailAddress1']
+ district = '{} {}'.format(row['County'], int(row['District'])).strip()
+ party = self.party_map[row['party'].upper()]
+ email = row['WorkEmail']
legislator = Legislator(term, chamber, district, full_name,
first_name=first_name, last_name=last_name,
@@ -52,8 +52,8 @@
email=email)
# Capture legislator office contact information.
- district_address = '{}\n{}\n{}, {} {}'.format(row['street'],
- row['address2'], row['city'], row['state'], row['zipcode']).strip()
+ district_address = '{}\n{}\n{}, {} {}'.format(row['Address'],
+ row['address2'], row['city'], row['State'], row['Zipcode']).strip()
legislator.add_office('district', 'Home Address',
address=district_address)
| {"golden_diff": "diff --git a/openstates/nh/legislators.py b/openstates/nh/legislators.py\n--- a/openstates/nh/legislators.py\n+++ b/openstates/nh/legislators.py\n@@ -38,13 +38,13 @@\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n- last_name = row['lastname']\n+ last_name = row['LastName']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n \n- district = '{} {}'.format(row['county'], int(row['District'])).strip()\n- party = self.party_map[row['party']]\n- email = row['EMailAddress1']\n+ district = '{} {}'.format(row['County'], int(row['District'])).strip()\n+ party = self.party_map[row['party'].upper()]\n+ email = row['WorkEmail']\n \n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n@@ -52,8 +52,8 @@\n email=email)\n \n # Capture legislator office contact information.\n- district_address = '{}\\n{}\\n{}, {} {}'.format(row['street'],\n- row['address2'], row['city'], row['state'], row['zipcode']).strip()\n+ district_address = '{}\\n{}\\n{}, {} {}'.format(row['Address'],\n+ row['address2'], row['city'], row['State'], row['Zipcode']).strip()\n \n legislator.add_office('district', 'Home Address',\n address=district_address)\n", "issue": "NH: legislators scrape is failing\nState: NH\r\n\r\nLegislator scraper needs some attention\n", "before_files": [{"content": "import re\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\n\nclass NHLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'nh'\n latest_only = True\n members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt'\n\n chamber_map = {'H': 'lower', 'S': 'upper'}\n party_map = {\n 'D': 'Democratic',\n 'R': 'Republican',\n 'I': 'Independent',\n 'L': 'Libertarian',\n }\n\n def _get_photo(self, url, chamber):\n \"\"\"Attempts to find a portrait in the given legislator profile.\"\"\"\n doc = self.lxmlize(url)\n\n if chamber == 'upper':\n src = doc.xpath('//div[@id=\"page_content\"]//img[contains(@src, '\n '\"images/senators\") or contains(@src, \"Senator\")]/@src')\n elif chamber == 'lower':\n src = doc.xpath('//img[contains(@src, \"images/memberpics\")]/@src')\n\n if src and 'nophoto' not in src[0]:\n photo_url = src[0]\n else:\n photo_url = ''\n\n return photo_url\n\n def _parse_legislator(self, row, chamber, term):\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n last_name = row['lastname']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n\n district = '{} {}'.format(row['county'], int(row['District'])).strip()\n party = self.party_map[row['party']]\n email = row['EMailAddress1']\n\n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n middle_name=middle_name, party=party,\n email=email)\n\n # Capture legislator office contact information.\n district_address = '{}\\n{}\\n{}, {} {}'.format(row['street'],\n row['address2'], row['city'], row['state'], row['zipcode']).strip()\n\n legislator.add_office('district', 'Home Address',\n address=district_address)\n\n # Retrieve legislator portrait.\n #profile_url = None\n #if chamber == 'upper':\n # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District'])\n #elif chamber == 'lower':\n # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no'])\n\n #if profile_url:\n # legislator['photo_url'] = self._get_photo(profile_url, chamber)\n # legislator.add_source(profile_url)\n\n return legislator\n\n def _parse_members_txt(self):\n lines = self.get(self.members_url).text.splitlines()\n\n header = lines[0].split('\\t')\n\n for line in lines[1:]:\n yield dict(zip(header, line.split('\\t')))\n\n def scrape(self, chamber, term):\n for row in self._parse_members_txt():\n if self.chamber_map[row['LegislativeBody']] == chamber:\n leg = self._parse_legislator(row, chamber, term)\n leg.add_source(self.members_url)\n self.save_legislator(leg)\n", "path": "openstates/nh/legislators.py"}], "after_files": [{"content": "import re\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\n\nclass NHLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'nh'\n latest_only = True\n members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt'\n\n chamber_map = {'H': 'lower', 'S': 'upper'}\n party_map = {\n 'D': 'Democratic',\n 'R': 'Republican',\n 'I': 'Independent',\n 'L': 'Libertarian',\n }\n\n def _get_photo(self, url, chamber):\n \"\"\"Attempts to find a portrait in the given legislator profile.\"\"\"\n doc = self.lxmlize(url)\n\n if chamber == 'upper':\n src = doc.xpath('//div[@id=\"page_content\"]//img[contains(@src, '\n '\"images/senators\") or contains(@src, \"Senator\")]/@src')\n elif chamber == 'lower':\n src = doc.xpath('//img[contains(@src, \"images/memberpics\")]/@src')\n\n if src and 'nophoto' not in src[0]:\n photo_url = src[0]\n else:\n photo_url = ''\n\n return photo_url\n\n def _parse_legislator(self, row, chamber, term):\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n last_name = row['LastName']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n\n district = '{} {}'.format(row['County'], int(row['District'])).strip()\n party = self.party_map[row['party'].upper()]\n email = row['WorkEmail']\n\n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n middle_name=middle_name, party=party,\n email=email)\n\n # Capture legislator office contact information.\n district_address = '{}\\n{}\\n{}, {} {}'.format(row['Address'],\n row['address2'], row['city'], row['State'], row['Zipcode']).strip()\n\n legislator.add_office('district', 'Home Address',\n address=district_address)\n\n # Retrieve legislator portrait.\n #profile_url = None\n #if chamber == 'upper':\n # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District'])\n #elif chamber == 'lower':\n # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no'])\n\n #if profile_url:\n # legislator['photo_url'] = self._get_photo(profile_url, chamber)\n # legislator.add_source(profile_url)\n\n return legislator\n\n def _parse_members_txt(self):\n lines = self.get(self.members_url).text.splitlines()\n\n header = lines[0].split('\\t')\n\n for line in lines[1:]:\n yield dict(zip(header, line.split('\\t')))\n\n def scrape(self, chamber, term):\n for row in self._parse_members_txt():\n if self.chamber_map[row['LegislativeBody']] == chamber:\n leg = self._parse_legislator(row, chamber, term)\n leg.add_source(self.members_url)\n self.save_legislator(leg)\n", "path": "openstates/nh/legislators.py"}]} | 1,234 | 378 |
gh_patches_debug_25734 | rasdani/github-patches | git_diff | GPflow__GPflow-1446 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ChangePoints kernel breaks for len(X) != len(X2)
<!-- Lines like this are comments and will be invisible -->
# Bug
There seems to be a bug for models using the Changepoints kernel, whereby given a model
m =gpflow.models.GPR(data=(X,y), kernel=k, mean_function=None)
with k a Changepoint kernel, (using a combination of any base kernels), the model won't produce a value for predict_y.
<!-- A clear and concise description of what the bug is. -->
## To reproduce
**Minimal, reproducible example**
<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->
```python
import numpy as np
import gpflow
X = np.linspace(0,100,100).reshape(100,1)
base_k1 = gpflow.kernels.Matern32(lengthscales=0.2)
base_k2 = gpflow.kernels.Matern32(lengthscales=2.0)
k = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)
k(X) # works
N = 25 # anything other than N=100 will reproduce the bug
xx = np.linspace(0,50,N).reshape(N,1)
k(X, xx) # breaks
```
**Stack trace, or error message**
```
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-25-d1dbc7941bae> in <module>
----> 1 k(X, xx)
~/Code/GPflow/gpflow/kernels/base.py in __call__(self, X, X2, full_cov, presliced)
170
171 else:
--> 172 return self.K(X, X2)
173
174 def __add__(self, other):
~/Code/GPflow/gpflow/kernels/changepoints.py in K(self, X, X2)
83 N = tf.shape(X)[0]
84 ones = tf.ones((N, N, 1), dtype=X.dtype)
---> 85 starters = tf.concat([ones, starters], axis=2)
86 stoppers = tf.concat([stoppers, ones], axis=2)
87
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py in concat(values, axis, name)
1515 dtype=dtypes.int32).get_shape().assert_has_rank(0)
1516 return identity(values[0], name=name)
-> 1517 return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
1518
1519
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_array_ops.py in concat_v2(values, axis, name)
1116 pass # Add nodes to the TensorFlow graph.
1117 except _core._NotOkStatusException as e:
-> 1118 _ops.raise_from_not_ok_status(e, name)
1119 # Add nodes to the TensorFlow graph.
1120 if not isinstance(values, (list, tuple)):
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py in raise_from_not_ok_status(e, name)
6604 message = e.message + (" name: " + name if name is not None else "")
6605 # pylint: disable=protected-access
-> 6606 six.raise_from(core._status_to_exception(e.code, message), None)
6607 # pylint: enable=protected-access
6608
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/six.py in raise_from(value, from_value)
InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [100,100,1] vs. shape[1] = [100,25,1] [Op:ConcatV2] name: concat
```
## Expected behavior
Should return the kernel matrix of shape (100, 25).
## System information
* GPflow version: 2.0.1 (reproduced on `develop`)
* GPflow installed from: 'pip install gpflow'
* TensorFlow version: 2.1.0
* Python version 3.7.5
* Operating system Ubuntu Linux (18.04.3)
## Additional context
For me changing these two lines in changepoints.py or kernels did the trick:
```python
def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:
sig_X = self._sigmoids(X) # N x 1 x Ncp
sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X
# `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go
# from 1 -> 0, dimensions are N x N x Ncp
starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))
stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))
# prepend `starters` with ones and append ones to `stoppers` since the
# first kernel has no start and the last kernel has no end
N = tf.shape(X)[0]
M = tf.shape(X2)[0] if X2 is not None else N # THIS IS THE FIX
ones = tf.ones((N, M, 1), dtype=X.dtype) #PREVIOUSLY N WAS IN PLACE OF M HERE
starters = tf.concat([ones, starters], axis=2)
stoppers = tf.concat([stoppers, ones], axis=2)
```
However I have not run any kind of tests to make sure this would not break anything else (neither have I added a unit test for the changepoint kernel). It could be that I am using the changepoint kernel incorrectly, in which case it would be very helpful if a short demo of using the kernel in a model could be appended to the changepoint kernel notebook :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpflow/kernels/changepoints.py`
Content:
```
1 from collections.abc import Iterable
2 from typing import List, Optional, Union
3
4 import tensorflow as tf
5
6 from ..base import Parameter
7 from ..utilities import positive
8 from .base import Combination, Kernel
9
10
11 class ChangePoints(Combination):
12 r"""
13 The ChangePoints kernel defines a fixed number of change-points along a 1d
14 input space where different kernels govern different parts of the space.
15
16 The kernel is by multiplication and addition of the base kernels with
17 sigmoid functions (σ). A single change-point kernel is defined as:
18
19 K₁(x, x') * (1 - σ(x)) * (1 - σ(x')) + K₂(x, x') * σ(x) * σ(x')
20
21 where K₁ is deactivated around the change-point and K₂ is activated. The
22 single change-point version can be found in \citet{lloyd2014}. Each sigmoid
23 is a logistic function defined as:
24
25 σ(x) = 1 / (1 + exp{-s(x - x₀)})
26
27 parameterized by location "x₀" and steepness "s".
28
29 @incollection{lloyd2014,
30 author = {Lloyd, James Robert et al},
31 title = {Automatic Construction and Natural-language Description of Nonparametric Regression Models},
32 booktitle = {Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence},
33 year = {2014},
34 url = {http://dl.acm.org/citation.cfm?id=2893873.2894066},
35 }
36 """
37
38 def __init__(
39 self,
40 kernels: List[Kernel],
41 locations: List[float],
42 steepness: Union[float, List[float]] = 1.0,
43 name: Optional[str] = None,
44 ):
45 """
46 :param kernels: list of kernels defining the different regimes
47 :param locations: list of change-point locations in the 1d input space
48 :param steepness: the steepness parameter(s) of the sigmoids, this can be
49 common between them or decoupled
50 """
51 if len(kernels) != len(locations) + 1:
52 raise ValueError(
53 "Number of kernels ({nk}) must be one more than the number of "
54 "changepoint locations ({nl})".format(nk=len(kernels), nl=len(locations))
55 )
56
57 if isinstance(steepness, Iterable) and len(steepness) != len(locations):
58 raise ValueError(
59 "Dimension of steepness ({ns}) does not match number of changepoint "
60 "locations ({nl})".format(ns=len(steepness), nl=len(locations))
61 )
62
63 super().__init__(kernels, name=name)
64
65 self.locations = Parameter(locations)
66 self.steepness = Parameter(steepness, transform=positive())
67
68 def _set_kernels(self, kernels: List[Kernel]):
69 # it is not clear how to flatten out nested change-points
70 self.kernels = kernels
71
72 def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:
73 sig_X = self._sigmoids(X) # N x 1 x Ncp
74 sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X
75
76 # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go
77 # from 1 -> 0, dimensions are N x N x Ncp
78 starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))
79 stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))
80
81 # prepend `starters` with ones and append ones to `stoppers` since the
82 # first kernel has no start and the last kernel has no end
83 N = tf.shape(X)[0]
84 ones = tf.ones((N, N, 1), dtype=X.dtype)
85 starters = tf.concat([ones, starters], axis=2)
86 stoppers = tf.concat([stoppers, ones], axis=2)
87
88 # now combine with the underlying kernels
89 kernel_stack = tf.stack([k(X, X2) for k in self.kernels], axis=2)
90 return tf.reduce_sum(kernel_stack * starters * stoppers, axis=2)
91
92 def K_diag(self, X: tf.Tensor) -> tf.Tensor:
93 N = tf.shape(X)[0]
94 sig_X = tf.reshape(self._sigmoids(X), (N, -1)) # N x Ncp
95
96 ones = tf.ones((N, 1), dtype=X.dtype)
97 starters = tf.concat([ones, sig_X * sig_X], axis=1) # N x Ncp
98 stoppers = tf.concat([(1 - sig_X) * (1 - sig_X), ones], axis=1)
99
100 kernel_stack = tf.stack([k(X, full_cov=False) for k in self.kernels], axis=1)
101 return tf.reduce_sum(kernel_stack * starters * stoppers, axis=1)
102
103 def _sigmoids(self, X: tf.Tensor) -> tf.Tensor:
104 locations = tf.sort(self.locations) # ensure locations are ordered
105 locations = tf.reshape(locations, (1, 1, -1))
106 steepness = tf.reshape(self.steepness, (1, 1, -1))
107 return tf.sigmoid(steepness * (X[:, :, None] - locations))
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpflow/kernels/changepoints.py b/gpflow/kernels/changepoints.py
--- a/gpflow/kernels/changepoints.py
+++ b/gpflow/kernels/changepoints.py
@@ -70,18 +70,19 @@
self.kernels = kernels
def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:
- sig_X = self._sigmoids(X) # N x 1 x Ncp
- sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X
+ sig_X = self._sigmoids(X) # N1 x 1 x Ncp
+ sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X # N2 x 1 x Ncp
# `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go
- # from 1 -> 0, dimensions are N x N x Ncp
+ # from 1 -> 0, dimensions are N1 x N2 x Ncp
starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))
stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))
# prepend `starters` with ones and append ones to `stoppers` since the
# first kernel has no start and the last kernel has no end
- N = tf.shape(X)[0]
- ones = tf.ones((N, N, 1), dtype=X.dtype)
+ N1 = tf.shape(X)[0]
+ N2 = tf.shape(X2)[0] if X2 is not None else N1
+ ones = tf.ones((N1, N2, 1), dtype=X.dtype)
starters = tf.concat([ones, starters], axis=2)
stoppers = tf.concat([stoppers, ones], axis=2)
| {"golden_diff": "diff --git a/gpflow/kernels/changepoints.py b/gpflow/kernels/changepoints.py\n--- a/gpflow/kernels/changepoints.py\n+++ b/gpflow/kernels/changepoints.py\n@@ -70,18 +70,19 @@\n self.kernels = kernels\n \n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\n- sig_X = self._sigmoids(X) # N x 1 x Ncp\n- sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X\n+ sig_X = self._sigmoids(X) # N1 x 1 x Ncp\n+ sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X # N2 x 1 x Ncp\n \n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\n- # from 1 -> 0, dimensions are N x N x Ncp\n+ # from 1 -> 0, dimensions are N1 x N2 x Ncp\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\n \n # prepend `starters` with ones and append ones to `stoppers` since the\n # first kernel has no start and the last kernel has no end\n- N = tf.shape(X)[0]\n- ones = tf.ones((N, N, 1), dtype=X.dtype)\n+ N1 = tf.shape(X)[0]\n+ N2 = tf.shape(X2)[0] if X2 is not None else N1\n+ ones = tf.ones((N1, N2, 1), dtype=X.dtype)\n starters = tf.concat([ones, starters], axis=2)\n stoppers = tf.concat([stoppers, ones], axis=2)\n", "issue": "ChangePoints kernel breaks for len(X) != len(X2)\n<!-- Lines like this are comments and will be invisible -->\r\n\r\n# Bug \r\nThere seems to be a bug for models using the Changepoints kernel, whereby given a model \r\nm =gpflow.models.GPR(data=(X,y), kernel=k, mean_function=None)\r\nwith k a Changepoint kernel, (using a combination of any base kernels), the model won't produce a value for predict_y. \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To reproduce\r\n\r\n**Minimal, reproducible example**\r\n<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->\r\n```python\r\nimport numpy as np\r\nimport gpflow\r\nX = np.linspace(0,100,100).reshape(100,1)\r\nbase_k1 = gpflow.kernels.Matern32(lengthscales=0.2)\r\nbase_k2 = gpflow.kernels.Matern32(lengthscales=2.0)\r\nk = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)\r\nk(X) # works\r\n\r\nN = 25 # anything other than N=100 will reproduce the bug\r\nxx = np.linspace(0,50,N).reshape(N,1)\r\nk(X, xx) # breaks\r\n```\r\n\r\n**Stack trace, or error message**\r\n```\r\n---------------------------------------------------------------------------\r\nInvalidArgumentError Traceback (most recent call last)\r\n<ipython-input-25-d1dbc7941bae> in <module>\r\n----> 1 k(X, xx)\r\n\r\n~/Code/GPflow/gpflow/kernels/base.py in __call__(self, X, X2, full_cov, presliced)\r\n 170 \r\n 171 else:\r\n--> 172 return self.K(X, X2)\r\n 173 \r\n 174 def __add__(self, other):\r\n\r\n~/Code/GPflow/gpflow/kernels/changepoints.py in K(self, X, X2)\r\n 83 N = tf.shape(X)[0]\r\n 84 ones = tf.ones((N, N, 1), dtype=X.dtype)\r\n---> 85 starters = tf.concat([ones, starters], axis=2)\r\n 86 stoppers = tf.concat([stoppers, ones], axis=2)\r\n 87 \r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)\r\n 178 \"\"\"Call target, and fall back on dispatchers if there is a TypeError.\"\"\"\r\n 179 try:\r\n--> 180 return target(*args, **kwargs)\r\n 181 except (TypeError, ValueError):\r\n 182 # Note: convert_to_eager_tensor currently raises a ValueError, not a\r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py in concat(values, axis, name)\r\n 1515 dtype=dtypes.int32).get_shape().assert_has_rank(0)\r\n 1516 return identity(values[0], name=name)\r\n-> 1517 return gen_array_ops.concat_v2(values=values, axis=axis, name=name)\r\n 1518 \r\n 1519 \r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_array_ops.py in concat_v2(values, axis, name)\r\n 1116 pass # Add nodes to the TensorFlow graph.\r\n 1117 except _core._NotOkStatusException as e:\r\n-> 1118 _ops.raise_from_not_ok_status(e, name)\r\n 1119 # Add nodes to the TensorFlow graph.\r\n 1120 if not isinstance(values, (list, tuple)):\r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py in raise_from_not_ok_status(e, name)\r\n 6604 message = e.message + (\" name: \" + name if name is not None else \"\")\r\n 6605 # pylint: disable=protected-access\r\n-> 6606 six.raise_from(core._status_to_exception(e.code, message), None)\r\n 6607 # pylint: enable=protected-access\r\n 6608 \r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/six.py in raise_from(value, from_value)\r\n\r\nInvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [100,100,1] vs. shape[1] = [100,25,1] [Op:ConcatV2] name: concat\r\n```\r\n\r\n## Expected behavior\r\n\r\nShould return the kernel matrix of shape (100, 25).\r\n\r\n## System information\r\n\r\n* GPflow version: 2.0.1 (reproduced on `develop`)\r\n* GPflow installed from: 'pip install gpflow'\r\n* TensorFlow version: 2.1.0\r\n* Python version 3.7.5\r\n* Operating system Ubuntu Linux (18.04.3)\r\n\r\n## Additional context\r\n\r\nFor me changing these two lines in changepoints.py or kernels did the trick: \r\n\r\n```python\r\n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\r\n sig_X = self._sigmoids(X) # N x 1 x Ncp\r\n sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X\r\n\r\n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\r\n # from 1 -> 0, dimensions are N x N x Ncp\r\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\r\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\r\n\r\n # prepend `starters` with ones and append ones to `stoppers` since the\r\n # first kernel has no start and the last kernel has no end\r\n N = tf.shape(X)[0]\r\n M = tf.shape(X2)[0] if X2 is not None else N # THIS IS THE FIX\r\n ones = tf.ones((N, M, 1), dtype=X.dtype) #PREVIOUSLY N WAS IN PLACE OF M HERE\r\n starters = tf.concat([ones, starters], axis=2)\r\n stoppers = tf.concat([stoppers, ones], axis=2)\r\n```\r\n\r\nHowever I have not run any kind of tests to make sure this would not break anything else (neither have I added a unit test for the changepoint kernel). It could be that I am using the changepoint kernel incorrectly, in which case it would be very helpful if a short demo of using the kernel in a model could be appended to the changepoint kernel notebook :)\r\n\n", "before_files": [{"content": "from collections.abc import Iterable\nfrom typing import List, Optional, Union\n\nimport tensorflow as tf\n\nfrom ..base import Parameter\nfrom ..utilities import positive\nfrom .base import Combination, Kernel\n\n\nclass ChangePoints(Combination):\n r\"\"\"\n The ChangePoints kernel defines a fixed number of change-points along a 1d\n input space where different kernels govern different parts of the space.\n\n The kernel is by multiplication and addition of the base kernels with\n sigmoid functions (\u03c3). A single change-point kernel is defined as:\n\n K\u2081(x, x') * (1 - \u03c3(x)) * (1 - \u03c3(x')) + K\u2082(x, x') * \u03c3(x) * \u03c3(x')\n\n where K\u2081 is deactivated around the change-point and K\u2082 is activated. The\n single change-point version can be found in \\citet{lloyd2014}. Each sigmoid\n is a logistic function defined as:\n\n \u03c3(x) = 1 / (1 + exp{-s(x - x\u2080)})\n\n parameterized by location \"x\u2080\" and steepness \"s\".\n\n @incollection{lloyd2014,\n author = {Lloyd, James Robert et al},\n title = {Automatic Construction and Natural-language Description of Nonparametric Regression Models},\n booktitle = {Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence},\n year = {2014},\n url = {http://dl.acm.org/citation.cfm?id=2893873.2894066},\n }\n \"\"\"\n\n def __init__(\n self,\n kernels: List[Kernel],\n locations: List[float],\n steepness: Union[float, List[float]] = 1.0,\n name: Optional[str] = None,\n ):\n \"\"\"\n :param kernels: list of kernels defining the different regimes\n :param locations: list of change-point locations in the 1d input space\n :param steepness: the steepness parameter(s) of the sigmoids, this can be\n common between them or decoupled\n \"\"\"\n if len(kernels) != len(locations) + 1:\n raise ValueError(\n \"Number of kernels ({nk}) must be one more than the number of \"\n \"changepoint locations ({nl})\".format(nk=len(kernels), nl=len(locations))\n )\n\n if isinstance(steepness, Iterable) and len(steepness) != len(locations):\n raise ValueError(\n \"Dimension of steepness ({ns}) does not match number of changepoint \"\n \"locations ({nl})\".format(ns=len(steepness), nl=len(locations))\n )\n\n super().__init__(kernels, name=name)\n\n self.locations = Parameter(locations)\n self.steepness = Parameter(steepness, transform=positive())\n\n def _set_kernels(self, kernels: List[Kernel]):\n # it is not clear how to flatten out nested change-points\n self.kernels = kernels\n\n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\n sig_X = self._sigmoids(X) # N x 1 x Ncp\n sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X\n\n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\n # from 1 -> 0, dimensions are N x N x Ncp\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\n\n # prepend `starters` with ones and append ones to `stoppers` since the\n # first kernel has no start and the last kernel has no end\n N = tf.shape(X)[0]\n ones = tf.ones((N, N, 1), dtype=X.dtype)\n starters = tf.concat([ones, starters], axis=2)\n stoppers = tf.concat([stoppers, ones], axis=2)\n\n # now combine with the underlying kernels\n kernel_stack = tf.stack([k(X, X2) for k in self.kernels], axis=2)\n return tf.reduce_sum(kernel_stack * starters * stoppers, axis=2)\n\n def K_diag(self, X: tf.Tensor) -> tf.Tensor:\n N = tf.shape(X)[0]\n sig_X = tf.reshape(self._sigmoids(X), (N, -1)) # N x Ncp\n\n ones = tf.ones((N, 1), dtype=X.dtype)\n starters = tf.concat([ones, sig_X * sig_X], axis=1) # N x Ncp\n stoppers = tf.concat([(1 - sig_X) * (1 - sig_X), ones], axis=1)\n\n kernel_stack = tf.stack([k(X, full_cov=False) for k in self.kernels], axis=1)\n return tf.reduce_sum(kernel_stack * starters * stoppers, axis=1)\n\n def _sigmoids(self, X: tf.Tensor) -> tf.Tensor:\n locations = tf.sort(self.locations) # ensure locations are ordered\n locations = tf.reshape(locations, (1, 1, -1))\n steepness = tf.reshape(self.steepness, (1, 1, -1))\n return tf.sigmoid(steepness * (X[:, :, None] - locations))\n", "path": "gpflow/kernels/changepoints.py"}], "after_files": [{"content": "from collections.abc import Iterable\nfrom typing import List, Optional, Union\n\nimport tensorflow as tf\n\nfrom ..base import Parameter\nfrom ..utilities import positive\nfrom .base import Combination, Kernel\n\n\nclass ChangePoints(Combination):\n r\"\"\"\n The ChangePoints kernel defines a fixed number of change-points along a 1d\n input space where different kernels govern different parts of the space.\n\n The kernel is by multiplication and addition of the base kernels with\n sigmoid functions (\u03c3). A single change-point kernel is defined as:\n\n K\u2081(x, x') * (1 - \u03c3(x)) * (1 - \u03c3(x')) + K\u2082(x, x') * \u03c3(x) * \u03c3(x')\n\n where K\u2081 is deactivated around the change-point and K\u2082 is activated. The\n single change-point version can be found in \\citet{lloyd2014}. Each sigmoid\n is a logistic function defined as:\n\n \u03c3(x) = 1 / (1 + exp{-s(x - x\u2080)})\n\n parameterized by location \"x\u2080\" and steepness \"s\".\n\n @incollection{lloyd2014,\n author = {Lloyd, James Robert et al},\n title = {Automatic Construction and Natural-language Description of Nonparametric Regression Models},\n booktitle = {Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence},\n year = {2014},\n url = {http://dl.acm.org/citation.cfm?id=2893873.2894066},\n }\n \"\"\"\n\n def __init__(\n self,\n kernels: List[Kernel],\n locations: List[float],\n steepness: Union[float, List[float]] = 1.0,\n name: Optional[str] = None,\n ):\n \"\"\"\n :param kernels: list of kernels defining the different regimes\n :param locations: list of change-point locations in the 1d input space\n :param steepness: the steepness parameter(s) of the sigmoids, this can be\n common between them or decoupled\n \"\"\"\n if len(kernels) != len(locations) + 1:\n raise ValueError(\n \"Number of kernels ({nk}) must be one more than the number of \"\n \"changepoint locations ({nl})\".format(nk=len(kernels), nl=len(locations))\n )\n\n if isinstance(steepness, Iterable) and len(steepness) != len(locations):\n raise ValueError(\n \"Dimension of steepness ({ns}) does not match number of changepoint \"\n \"locations ({nl})\".format(ns=len(steepness), nl=len(locations))\n )\n\n super().__init__(kernels, name=name)\n\n self.locations = Parameter(locations)\n self.steepness = Parameter(steepness, transform=positive())\n\n def _set_kernels(self, kernels: List[Kernel]):\n # it is not clear how to flatten out nested change-points\n self.kernels = kernels\n\n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\n sig_X = self._sigmoids(X) # N1 x 1 x Ncp\n sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X # N2 x 1 x Ncp\n\n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\n # from 1 -> 0, dimensions are N1 x N2 x Ncp\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\n\n # prepend `starters` with ones and append ones to `stoppers` since the\n # first kernel has no start and the last kernel has no end\n N1 = tf.shape(X)[0]\n N2 = tf.shape(X2)[0] if X2 is not None else N1\n ones = tf.ones((N1, N2, 1), dtype=X.dtype)\n starters = tf.concat([ones, starters], axis=2)\n stoppers = tf.concat([stoppers, ones], axis=2)\n\n # now combine with the underlying kernels\n kernel_stack = tf.stack([k(X, X2) for k in self.kernels], axis=2)\n return tf.reduce_sum(kernel_stack * starters * stoppers, axis=2)\n\n def K_diag(self, X: tf.Tensor) -> tf.Tensor:\n N = tf.shape(X)[0]\n sig_X = tf.reshape(self._sigmoids(X), (N, -1)) # N x Ncp\n\n ones = tf.ones((N, 1), dtype=X.dtype)\n starters = tf.concat([ones, sig_X * sig_X], axis=1) # N x Ncp\n stoppers = tf.concat([(1 - sig_X) * (1 - sig_X), ones], axis=1)\n\n kernel_stack = tf.stack([k(X, full_cov=False) for k in self.kernels], axis=1)\n return tf.reduce_sum(kernel_stack * starters * stoppers, axis=1)\n\n def _sigmoids(self, X: tf.Tensor) -> tf.Tensor:\n locations = tf.sort(self.locations) # ensure locations are ordered\n locations = tf.reshape(locations, (1, 1, -1))\n steepness = tf.reshape(self.steepness, (1, 1, -1))\n return tf.sigmoid(steepness * (X[:, :, None] - locations))\n", "path": "gpflow/kernels/changepoints.py"}]} | 3,268 | 462 |
gh_patches_debug_2764 | rasdani/github-patches | git_diff | ivy-llc__ivy-18208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
expand
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/manipulation.py`
Content:
```
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 )
10
11
12 @to_ivy_arrays_and_back
13 def reshape(x, shape):
14 return ivy.reshape(x, shape)
15
16
17 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
18 @to_ivy_arrays_and_back
19 def abs(x, name=None):
20 return ivy.abs(x)
21
22
23 absolute = abs
24
25
26 @to_ivy_arrays_and_back
27 def stack(x, axis=0, name=None):
28 return ivy.stack(x, axis=axis)
29
30
31 @with_unsupported_dtypes({"2.5.0 and below": ("int8", "int16")}, "paddle")
32 @to_ivy_arrays_and_back
33 def concat(x, axis, name=None):
34 return ivy.concat(x, axis=axis)
35
36
37 @with_unsupported_dtypes(
38 {"2.5.0 and below": ("int8", "uint8", "int16", "float16")},
39 "paddle",
40 )
41 @to_ivy_arrays_and_back
42 def tile(x, repeat_times, name=None):
43 return ivy.tile(x, repeats=repeat_times)
44
45
46 @with_unsupported_dtypes(
47 {"2.5.0 and below": ("int16", "complex64", "complex128")},
48 "paddle",
49 )
50 @to_ivy_arrays_and_back
51 def split(x, num_or_sections, axis=0, name=None):
52 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
53
54
55 @with_unsupported_dtypes(
56 {"2.5.0 and below": ("float16", "bfloat16", "int8", "int16")},
57 "paddle",
58 )
59 @to_ivy_arrays_and_back
60 def squeeze(x, axis=None, name=None):
61 return ivy.squeeze(x, axis=axis)
62
63
64 @with_supported_dtypes(
65 {
66 "2.5.0 and below": (
67 "bool",
68 "float16",
69 "float32",
70 "float64",
71 "int32",
72 "int64",
73 "uint8",
74 )
75 },
76 "paddle",
77 )
78 @to_ivy_arrays_and_back
79 def cast(x, dtype):
80 return ivy.astype(x, dtype)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py
--- a/ivy/functional/frontends/paddle/tensor/manipulation.py
+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py
@@ -61,6 +61,15 @@
return ivy.squeeze(x, axis=axis)
+@with_supported_dtypes(
+ {"2.5.0 and below": ("bool", "float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def expand(x, shape, name=None):
+ return ivy.expand(x, shape)
+
+
@with_supported_dtypes(
{
"2.5.0 and below": (
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -61,6 +61,15 @@\n return ivy.squeeze(x, axis=axis)\n \n \n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def expand(x, shape, name=None):\n+ return ivy.expand(x, shape)\n+\n+\n @with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n", "issue": "expand\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]} | 971 | 192 |
gh_patches_debug_16282 | rasdani/github-patches | git_diff | EleutherAI__gpt-neox-733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checkpointing fails to save config data because it is a `dict` not a `str`
**Describe the bug**
Saving model checkpoints fails with stack trace:
```
Traceback (most recent call last):^M
File "/mnt/nvme/home/dashiell/gpt-neox/train.py", line 27, in <module>^M
pretrain(neox_args=neox_args)^M
File "/mnt/nvme/home/dashiell/gpt-neox/megatron/training.py", line 106, in pretrain^M
iteration = train(^M
File "/mnt/nvme/home/dashiell/gpt-neox/megatron/training.py", line 613, in train^M
save_checkpoint(^M
File "/mnt/nvme/home/dashiell/gpt-neox/megatron/checkpointing.py", line 208, in save_checkpoint^M
save_ds_checkpoint(iteration, model, neox_args)^M
File "/mnt/nvme/home/dashiell/gpt-neox/megatron/checkpointing.py", line 201, in save_ds_checkpoint^M
f.write(config_data)^M
TypeError: write() argument must be str, not dict^M
```
**To Reproduce**
`python3 deepy.py --conf_dir configs 1-3B.yml local_setup.yml`
**Expected behavior**
The checkpoint should save without failing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `megatron/checkpointing.py`
Content:
```
1 # Copyright (c) 2021, EleutherAI
2 # This file is based on code by the authors denoted below and has been modified from its original version.
3 #
4 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """Input/output checkpointing."""
19
20 import os
21 import re
22 import shutil
23 import random
24 import sys
25 import numpy as np
26
27 import torch
28 from glob import glob
29
30 from megatron import mpu
31 from megatron import print_rank_0
32 from megatron.utils import natural_sort
33 from megatron.text_generation_utils import get_batch, forward_model
34 from pathlib import Path
35 from pprint import pformat
36
37
38 def check_checkpoint_args(neox_args, checkpoint_args):
39 """Ensure fixed arguments for a model are the same for the input
40 arguments and the one retrieved from checkpoint."""
41
42 assert isinstance(checkpoint_args, dict), "args stored in checkpoint is a dict"
43 for checkpoint_arg_name, checkpoint_arg_value in checkpoint_args.items():
44 args_value = getattr(neox_args, checkpoint_arg_name)
45 error_message = "{} value from checkpoint ({}) is not equal to the currently set argument value ({}).".format(
46 checkpoint_arg_name, checkpoint_arg_value, args_value
47 )
48 assert checkpoint_arg_value == args_value, error_message
49
50
51 def do_forward_pass(neox_args, model, inference=False):
52
53 # set to eval mode
54 model_was_in_train = model.training
55 model.eval()
56
57 # get context tokens
58 # always forward full batch size
59 context_tokens_tensor = (
60 torch.arange(neox_args.seq_length + 1)
61 .repeat((neox_args.train_micro_batch_size_per_gpu, 1))
62 .cuda()
63 )
64
65 # forward
66 if inference:
67 tokens, attention_mask, position_ids = get_batch(
68 neox_args, context_tokens_tensor[:, : neox_args.seq_length]
69 )
70 model_inputs = (
71 tokens,
72 position_ids,
73 attention_mask,
74 torch.Tensor(),
75 )
76 logits, _ = forward_model(neox_args, model, model_inputs)
77 elif neox_args.is_pipe_parallel:
78 data_iterator = iter([{"text": context_tokens_tensor}])
79 _, logits = model.eval_batch(data_iter=data_iterator, return_logits=True)
80 else:
81 tokens, attention_mask, position_ids = get_batch(
82 neox_args, context_tokens_tensor[:, : neox_args.seq_length]
83 )
84 logits = model((tokens, position_ids, attention_mask))
85
86 # reset to train mode, if model was in training before
87 if model_was_in_train:
88 model.train()
89
90 if logits is not None:
91 logits = logits.detach().cpu()[
92 0
93 ] # just return first batch item (they are all equal)
94
95 return logits
96
97
98 def check_forward_pass(neox_args, model, checkpoint_logits, inference):
99 # do forward pass with loaded checkpoint
100 logits = do_forward_pass(neox_args=neox_args, model=model, inference=inference)
101
102 # check
103 if (
104 logits is not None and checkpoint_logits is not None
105 ): # this could be the case for non-final pipeline stages
106 if not (logits == checkpoint_logits).all().item():
107 if mpu.get_data_parallel_rank() == 0:
108 print(
109 " > WARNING: validate_checkpoint_forward() forward after load of checkpoint does not yield exactly same result"
110 )
111 assert (
112 torch.isclose(logits, checkpoint_logits).all().item()
113 ), "validate_checkpoint_forward() forward after load of checkpoint does not yield a close result"
114
115
116 def ensure_directory_exists(filename):
117 """Build filename's path if it does not already exists."""
118 dirname = os.path.dirname(filename)
119 if not os.path.exists(dirname):
120 os.makedirs(dirname)
121
122
123 def get_checkpoint_name(checkpoints_path, iteration, release=False, mp_rank=None):
124 """A unified checkpoint name."""
125 if release:
126 directory = "release"
127 else:
128 directory = "iter_{:07d}".format(iteration)
129 return os.path.join(
130 checkpoints_path,
131 directory,
132 "mp_rank_{:02d}".format(
133 mpu.get_model_parallel_rank() if mp_rank is None else mp_rank
134 ),
135 "model_optim_rng.pt",
136 )
137
138
139 def delete_old_checkpoints(save_dir, n_to_keep):
140 if torch.distributed.get_rank() == 0:
141 ckpt_dir_regex = r"global_step[\d]*"
142 if save_dir.endswith("/"):
143 save_dir = save_dir.strip("/")
144 all_ckpts = natural_sort(
145 [
146 i
147 for i in glob(f"{save_dir}/*")
148 if os.path.isdir(i) and re.search(ckpt_dir_regex, i)
149 ]
150 )
151 n_to_delete = len(all_ckpts) - n_to_keep
152 if n_to_delete > 0:
153 to_delete = all_ckpts[:n_to_delete]
154 print(f"WARNING: Deleting old checkpoints: \n\t{', '.join(to_delete)}")
155 for ckpt in to_delete:
156 try:
157 shutil.rmtree(ckpt)
158 except FileNotFoundError:
159 pass
160
161
162 def save_ds_checkpoint(iteration, model, neox_args):
163 """Save a model checkpoint."""
164 sd = {
165 "iteration": iteration,
166 "args": {
167 "num_layers": neox_args.num_layers,
168 "hidden_size": neox_args.hidden_size,
169 "num_attention_heads": neox_args.num_attention_heads,
170 "max_position_embeddings": neox_args.max_position_embeddings,
171 "make_vocab_size_divisible_by": neox_args.make_vocab_size_divisible_by,
172 "padded_vocab_size": neox_args.padded_vocab_size,
173 "tokenizer_type": neox_args.tokenizer_type,
174 "model_parallel_size": neox_args.model_parallel_size,
175 },
176 }
177 # rng states.
178 if not neox_args.no_save_rng:
179 sd["random_rng_state"] = random.getstate()
180 sd["np_rng_state"] = np.random.get_state()
181 sd["torch_rng_state"] = torch.get_rng_state()
182 sd["cuda_rng_state"] = torch.cuda.get_rng_state()
183 sd["rng_tracker_states"] = mpu.get_cuda_rng_tracker().get_states()
184
185 if neox_args.checkpoint_validation_with_forward_pass:
186 logits = do_forward_pass(neox_args=neox_args, model=model)
187 sd["checkpoint_validation_logits"] = logits
188
189 # checkpoint folder name
190 tag = f"global_step{iteration}"
191
192 # save checkpoint
193 model.save_checkpoint(neox_args.save, tag=tag, client_state=sd)
194
195 # save config files
196 if torch.distributed.get_rank() == 0 and neox_args.config_files is not None:
197 configs_directory = os.path.join(neox_args.save, tag, "configs")
198 os.makedirs(configs_directory, exist_ok=True)
199 for config_filename, config_data in neox_args.config_files.items():
200 with open(os.path.join(configs_directory, config_filename), "w") as f:
201 f.write(config_data)
202
203
204 def save_checkpoint(neox_args, iteration, model, optimizer, lr_scheduler):
205 """Save a model checkpoint."""
206
207 if neox_args.deepspeed:
208 save_ds_checkpoint(iteration, model, neox_args)
209 else:
210 raise ValueError("Must be using deepspeed to use neox")
211
212 # Wait so everyone is done (necessary)
213 torch.distributed.barrier()
214 if neox_args.keep_last_n_checkpoints is not None:
215 delete_old_checkpoints(neox_args.save, neox_args.keep_last_n_checkpoints)
216
217 # Wait so everyone is done (not necessary)
218 torch.distributed.barrier()
219
220
221 def load_checkpoint(
222 neox_args, model, optimizer, lr_scheduler, inference=False, iteration=None
223 ):
224 """Load a model checkpoint and return the iteration."""
225 if neox_args.deepspeed:
226 load_optim_and_scheduler = (
227 not neox_args.no_load_optim
228 ) # TODO: These should be configured by separate args
229 if neox_args.finetune:
230 load_optim_and_scheduler = False
231 if iteration is not None:
232 tag = f"global_step{iteration}"
233 else:
234 tag = None
235 checkpoint_name, state_dict = model.load_checkpoint(
236 neox_args.load,
237 load_optimizer_states=load_optim_and_scheduler,
238 load_lr_scheduler_states=load_optim_and_scheduler,
239 tag=tag,
240 )
241
242 if checkpoint_name is None:
243 # if an iteration is specified, we want to raise an error here rather than
244 # continuing silently, since we are trying to load a specific checkpoint
245 if iteration is not None:
246 available_checkpoints = sorted(
247 [
248 int(i.name.replace("global_step", ""))
249 for i in Path(neox_args.load).glob("global_step*")
250 ]
251 )
252 raise ValueError(
253 f"Unable to load checkpoint for iteration {iteration}. \nAvailable iterations: {pformat(available_checkpoints)}"
254 )
255 if mpu.get_data_parallel_rank() == 0:
256 print("Unable to load checkpoint.")
257
258 return 0 # iteration 0, if not checkpoint loaded
259 else:
260 raise ValueError("Must be using deepspeed to use neox")
261
262 # Set iteration.
263 if neox_args.finetune:
264 iteration = 0
265 else:
266 iteration = state_dict.get("iteration") or state_dict.get(
267 "total_iters"
268 ) # total_iters backward compatible with older checkpoints
269 if iteration is None:
270 raise ValueError(
271 f"Unable to load iteration from checkpoint {checkpoint_name} with keys {state_dict.keys()}, exiting"
272 )
273
274 # Check arguments.
275 if "args" in state_dict:
276 checkpoint_args = state_dict["args"]
277 check_checkpoint_args(neox_args=neox_args, checkpoint_args=checkpoint_args)
278 print_rank_0(
279 " > validated currently set args with arguments in the checkpoint ..."
280 )
281 else:
282 print_rank_0(" > could not find arguments in the checkpoint for validation...")
283
284 # Check loaded checkpoint with forward pass
285 if neox_args.checkpoint_validation_with_forward_pass:
286 if "checkpoint_validation_logits" in state_dict:
287 check_forward_pass(
288 neox_args=neox_args,
289 model=model,
290 checkpoint_logits=state_dict["checkpoint_validation_logits"],
291 inference=inference,
292 )
293 print_rank_0(" > validated loaded checkpoint with forward pass ...")
294 else:
295 if mpu.get_data_parallel_rank() == 0:
296 print(
297 " > WARNING: checkpoint_validation_with_forward_pass is configured but no checkpoint validation data available in checkpoint {}".format(
298 checkpoint_name
299 )
300 )
301
302 # rng states.
303 if not neox_args.finetune and not neox_args.no_load_rng:
304 try:
305 random.setstate(state_dict["random_rng_state"])
306 np.random.set_state(state_dict["np_rng_state"])
307 torch.set_rng_state(state_dict["torch_rng_state"])
308 torch.cuda.set_rng_state(state_dict["cuda_rng_state"])
309 mpu.get_cuda_rng_tracker().set_states(state_dict["rng_tracker_states"])
310 except KeyError:
311 print_rank_0(
312 "Unable to load optimizer from checkpoint {}. "
313 "Specify --no-load-rng or --finetune to prevent "
314 "attempting to load the optimizer state, "
315 "exiting ...".format(checkpoint_name)
316 )
317 sys.exit()
318
319 torch.distributed.barrier()
320 if mpu.get_data_parallel_rank() == 0:
321 print(" successfully loaded {}".format(checkpoint_name))
322
323 return iteration
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py
--- a/megatron/checkpointing.py
+++ b/megatron/checkpointing.py
@@ -17,6 +17,7 @@
"""Input/output checkpointing."""
+import json
import os
import re
import shutil
@@ -198,7 +199,10 @@
os.makedirs(configs_directory, exist_ok=True)
for config_filename, config_data in neox_args.config_files.items():
with open(os.path.join(configs_directory, config_filename), "w") as f:
- f.write(config_data)
+ if isinstance(config_data, str):
+ f.write(config_data)
+ else:
+ json.dump(config_data, f)
def save_checkpoint(neox_args, iteration, model, optimizer, lr_scheduler):
| {"golden_diff": "diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py\n--- a/megatron/checkpointing.py\n+++ b/megatron/checkpointing.py\n@@ -17,6 +17,7 @@\n \n \"\"\"Input/output checkpointing.\"\"\"\n \n+import json\n import os\n import re\n import shutil\n@@ -198,7 +199,10 @@\n os.makedirs(configs_directory, exist_ok=True)\n for config_filename, config_data in neox_args.config_files.items():\n with open(os.path.join(configs_directory, config_filename), \"w\") as f:\n- f.write(config_data)\n+ if isinstance(config_data, str):\n+ f.write(config_data)\n+ else:\n+ json.dump(config_data, f)\n \n \n def save_checkpoint(neox_args, iteration, model, optimizer, lr_scheduler):\n", "issue": "Checkpointing fails to save config data because it is a `dict` not a `str`\n**Describe the bug**\r\nSaving model checkpoints fails with stack trace:\r\n\r\n```\r\nTraceback (most recent call last):^M\r\n File \"/mnt/nvme/home/dashiell/gpt-neox/train.py\", line 27, in <module>^M\r\n pretrain(neox_args=neox_args)^M\r\n File \"/mnt/nvme/home/dashiell/gpt-neox/megatron/training.py\", line 106, in pretrain^M\r\n iteration = train(^M\r\n File \"/mnt/nvme/home/dashiell/gpt-neox/megatron/training.py\", line 613, in train^M\r\n save_checkpoint(^M\r\n File \"/mnt/nvme/home/dashiell/gpt-neox/megatron/checkpointing.py\", line 208, in save_checkpoint^M\r\n save_ds_checkpoint(iteration, model, neox_args)^M\r\n File \"/mnt/nvme/home/dashiell/gpt-neox/megatron/checkpointing.py\", line 201, in save_ds_checkpoint^M\r\n f.write(config_data)^M\r\nTypeError: write() argument must be str, not dict^M\r\n```\r\n\r\n**To Reproduce**\r\n`python3 deepy.py --conf_dir configs 1-3B.yml local_setup.yml`\r\n\r\n\r\n**Expected behavior**\r\nThe checkpoint should save without failing. \r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2021, EleutherAI\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input/output checkpointing.\"\"\"\n\nimport os\nimport re\nimport shutil\nimport random\nimport sys\nimport numpy as np\n\nimport torch\nfrom glob import glob\n\nfrom megatron import mpu\nfrom megatron import print_rank_0\nfrom megatron.utils import natural_sort\nfrom megatron.text_generation_utils import get_batch, forward_model\nfrom pathlib import Path\nfrom pprint import pformat\n\n\ndef check_checkpoint_args(neox_args, checkpoint_args):\n \"\"\"Ensure fixed arguments for a model are the same for the input\n arguments and the one retrieved from checkpoint.\"\"\"\n\n assert isinstance(checkpoint_args, dict), \"args stored in checkpoint is a dict\"\n for checkpoint_arg_name, checkpoint_arg_value in checkpoint_args.items():\n args_value = getattr(neox_args, checkpoint_arg_name)\n error_message = \"{} value from checkpoint ({}) is not equal to the currently set argument value ({}).\".format(\n checkpoint_arg_name, checkpoint_arg_value, args_value\n )\n assert checkpoint_arg_value == args_value, error_message\n\n\ndef do_forward_pass(neox_args, model, inference=False):\n\n # set to eval mode\n model_was_in_train = model.training\n model.eval()\n\n # get context tokens\n # always forward full batch size\n context_tokens_tensor = (\n torch.arange(neox_args.seq_length + 1)\n .repeat((neox_args.train_micro_batch_size_per_gpu, 1))\n .cuda()\n )\n\n # forward\n if inference:\n tokens, attention_mask, position_ids = get_batch(\n neox_args, context_tokens_tensor[:, : neox_args.seq_length]\n )\n model_inputs = (\n tokens,\n position_ids,\n attention_mask,\n torch.Tensor(),\n )\n logits, _ = forward_model(neox_args, model, model_inputs)\n elif neox_args.is_pipe_parallel:\n data_iterator = iter([{\"text\": context_tokens_tensor}])\n _, logits = model.eval_batch(data_iter=data_iterator, return_logits=True)\n else:\n tokens, attention_mask, position_ids = get_batch(\n neox_args, context_tokens_tensor[:, : neox_args.seq_length]\n )\n logits = model((tokens, position_ids, attention_mask))\n\n # reset to train mode, if model was in training before\n if model_was_in_train:\n model.train()\n\n if logits is not None:\n logits = logits.detach().cpu()[\n 0\n ] # just return first batch item (they are all equal)\n\n return logits\n\n\ndef check_forward_pass(neox_args, model, checkpoint_logits, inference):\n # do forward pass with loaded checkpoint\n logits = do_forward_pass(neox_args=neox_args, model=model, inference=inference)\n\n # check\n if (\n logits is not None and checkpoint_logits is not None\n ): # this could be the case for non-final pipeline stages\n if not (logits == checkpoint_logits).all().item():\n if mpu.get_data_parallel_rank() == 0:\n print(\n \" > WARNING: validate_checkpoint_forward() forward after load of checkpoint does not yield exactly same result\"\n )\n assert (\n torch.isclose(logits, checkpoint_logits).all().item()\n ), \"validate_checkpoint_forward() forward after load of checkpoint does not yield a close result\"\n\n\ndef ensure_directory_exists(filename):\n \"\"\"Build filename's path if it does not already exists.\"\"\"\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\ndef get_checkpoint_name(checkpoints_path, iteration, release=False, mp_rank=None):\n \"\"\"A unified checkpoint name.\"\"\"\n if release:\n directory = \"release\"\n else:\n directory = \"iter_{:07d}\".format(iteration)\n return os.path.join(\n checkpoints_path,\n directory,\n \"mp_rank_{:02d}\".format(\n mpu.get_model_parallel_rank() if mp_rank is None else mp_rank\n ),\n \"model_optim_rng.pt\",\n )\n\n\ndef delete_old_checkpoints(save_dir, n_to_keep):\n if torch.distributed.get_rank() == 0:\n ckpt_dir_regex = r\"global_step[\\d]*\"\n if save_dir.endswith(\"/\"):\n save_dir = save_dir.strip(\"/\")\n all_ckpts = natural_sort(\n [\n i\n for i in glob(f\"{save_dir}/*\")\n if os.path.isdir(i) and re.search(ckpt_dir_regex, i)\n ]\n )\n n_to_delete = len(all_ckpts) - n_to_keep\n if n_to_delete > 0:\n to_delete = all_ckpts[:n_to_delete]\n print(f\"WARNING: Deleting old checkpoints: \\n\\t{', '.join(to_delete)}\")\n for ckpt in to_delete:\n try:\n shutil.rmtree(ckpt)\n except FileNotFoundError:\n pass\n\n\ndef save_ds_checkpoint(iteration, model, neox_args):\n \"\"\"Save a model checkpoint.\"\"\"\n sd = {\n \"iteration\": iteration,\n \"args\": {\n \"num_layers\": neox_args.num_layers,\n \"hidden_size\": neox_args.hidden_size,\n \"num_attention_heads\": neox_args.num_attention_heads,\n \"max_position_embeddings\": neox_args.max_position_embeddings,\n \"make_vocab_size_divisible_by\": neox_args.make_vocab_size_divisible_by,\n \"padded_vocab_size\": neox_args.padded_vocab_size,\n \"tokenizer_type\": neox_args.tokenizer_type,\n \"model_parallel_size\": neox_args.model_parallel_size,\n },\n }\n # rng states.\n if not neox_args.no_save_rng:\n sd[\"random_rng_state\"] = random.getstate()\n sd[\"np_rng_state\"] = np.random.get_state()\n sd[\"torch_rng_state\"] = torch.get_rng_state()\n sd[\"cuda_rng_state\"] = torch.cuda.get_rng_state()\n sd[\"rng_tracker_states\"] = mpu.get_cuda_rng_tracker().get_states()\n\n if neox_args.checkpoint_validation_with_forward_pass:\n logits = do_forward_pass(neox_args=neox_args, model=model)\n sd[\"checkpoint_validation_logits\"] = logits\n\n # checkpoint folder name\n tag = f\"global_step{iteration}\"\n\n # save checkpoint\n model.save_checkpoint(neox_args.save, tag=tag, client_state=sd)\n\n # save config files\n if torch.distributed.get_rank() == 0 and neox_args.config_files is not None:\n configs_directory = os.path.join(neox_args.save, tag, \"configs\")\n os.makedirs(configs_directory, exist_ok=True)\n for config_filename, config_data in neox_args.config_files.items():\n with open(os.path.join(configs_directory, config_filename), \"w\") as f:\n f.write(config_data)\n\n\ndef save_checkpoint(neox_args, iteration, model, optimizer, lr_scheduler):\n \"\"\"Save a model checkpoint.\"\"\"\n\n if neox_args.deepspeed:\n save_ds_checkpoint(iteration, model, neox_args)\n else:\n raise ValueError(\"Must be using deepspeed to use neox\")\n\n # Wait so everyone is done (necessary)\n torch.distributed.barrier()\n if neox_args.keep_last_n_checkpoints is not None:\n delete_old_checkpoints(neox_args.save, neox_args.keep_last_n_checkpoints)\n\n # Wait so everyone is done (not necessary)\n torch.distributed.barrier()\n\n\ndef load_checkpoint(\n neox_args, model, optimizer, lr_scheduler, inference=False, iteration=None\n):\n \"\"\"Load a model checkpoint and return the iteration.\"\"\"\n if neox_args.deepspeed:\n load_optim_and_scheduler = (\n not neox_args.no_load_optim\n ) # TODO: These should be configured by separate args\n if neox_args.finetune:\n load_optim_and_scheduler = False\n if iteration is not None:\n tag = f\"global_step{iteration}\"\n else:\n tag = None\n checkpoint_name, state_dict = model.load_checkpoint(\n neox_args.load,\n load_optimizer_states=load_optim_and_scheduler,\n load_lr_scheduler_states=load_optim_and_scheduler,\n tag=tag,\n )\n\n if checkpoint_name is None:\n # if an iteration is specified, we want to raise an error here rather than\n # continuing silently, since we are trying to load a specific checkpoint\n if iteration is not None:\n available_checkpoints = sorted(\n [\n int(i.name.replace(\"global_step\", \"\"))\n for i in Path(neox_args.load).glob(\"global_step*\")\n ]\n )\n raise ValueError(\n f\"Unable to load checkpoint for iteration {iteration}. \\nAvailable iterations: {pformat(available_checkpoints)}\"\n )\n if mpu.get_data_parallel_rank() == 0:\n print(\"Unable to load checkpoint.\")\n\n return 0 # iteration 0, if not checkpoint loaded\n else:\n raise ValueError(\"Must be using deepspeed to use neox\")\n\n # Set iteration.\n if neox_args.finetune:\n iteration = 0\n else:\n iteration = state_dict.get(\"iteration\") or state_dict.get(\n \"total_iters\"\n ) # total_iters backward compatible with older checkpoints\n if iteration is None:\n raise ValueError(\n f\"Unable to load iteration from checkpoint {checkpoint_name} with keys {state_dict.keys()}, exiting\"\n )\n\n # Check arguments.\n if \"args\" in state_dict:\n checkpoint_args = state_dict[\"args\"]\n check_checkpoint_args(neox_args=neox_args, checkpoint_args=checkpoint_args)\n print_rank_0(\n \" > validated currently set args with arguments in the checkpoint ...\"\n )\n else:\n print_rank_0(\" > could not find arguments in the checkpoint for validation...\")\n\n # Check loaded checkpoint with forward pass\n if neox_args.checkpoint_validation_with_forward_pass:\n if \"checkpoint_validation_logits\" in state_dict:\n check_forward_pass(\n neox_args=neox_args,\n model=model,\n checkpoint_logits=state_dict[\"checkpoint_validation_logits\"],\n inference=inference,\n )\n print_rank_0(\" > validated loaded checkpoint with forward pass ...\")\n else:\n if mpu.get_data_parallel_rank() == 0:\n print(\n \" > WARNING: checkpoint_validation_with_forward_pass is configured but no checkpoint validation data available in checkpoint {}\".format(\n checkpoint_name\n )\n )\n\n # rng states.\n if not neox_args.finetune and not neox_args.no_load_rng:\n try:\n random.setstate(state_dict[\"random_rng_state\"])\n np.random.set_state(state_dict[\"np_rng_state\"])\n torch.set_rng_state(state_dict[\"torch_rng_state\"])\n torch.cuda.set_rng_state(state_dict[\"cuda_rng_state\"])\n mpu.get_cuda_rng_tracker().set_states(state_dict[\"rng_tracker_states\"])\n except KeyError:\n print_rank_0(\n \"Unable to load optimizer from checkpoint {}. \"\n \"Specify --no-load-rng or --finetune to prevent \"\n \"attempting to load the optimizer state, \"\n \"exiting ...\".format(checkpoint_name)\n )\n sys.exit()\n\n torch.distributed.barrier()\n if mpu.get_data_parallel_rank() == 0:\n print(\" successfully loaded {}\".format(checkpoint_name))\n\n return iteration\n", "path": "megatron/checkpointing.py"}], "after_files": [{"content": "# Copyright (c) 2021, EleutherAI\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input/output checkpointing.\"\"\"\n\nimport json\nimport os\nimport re\nimport shutil\nimport random\nimport sys\nimport numpy as np\n\nimport torch\nfrom glob import glob\n\nfrom megatron import mpu\nfrom megatron import print_rank_0\nfrom megatron.utils import natural_sort\nfrom megatron.text_generation_utils import get_batch, forward_model\nfrom pathlib import Path\nfrom pprint import pformat\n\n\ndef check_checkpoint_args(neox_args, checkpoint_args):\n \"\"\"Ensure fixed arguments for a model are the same for the input\n arguments and the one retrieved from checkpoint.\"\"\"\n\n assert isinstance(checkpoint_args, dict), \"args stored in checkpoint is a dict\"\n for checkpoint_arg_name, checkpoint_arg_value in checkpoint_args.items():\n args_value = getattr(neox_args, checkpoint_arg_name)\n error_message = \"{} value from checkpoint ({}) is not equal to the currently set argument value ({}).\".format(\n checkpoint_arg_name, checkpoint_arg_value, args_value\n )\n assert checkpoint_arg_value == args_value, error_message\n\n\ndef do_forward_pass(neox_args, model, inference=False):\n\n # set to eval mode\n model_was_in_train = model.training\n model.eval()\n\n # get context tokens\n # always forward full batch size\n context_tokens_tensor = (\n torch.arange(neox_args.seq_length + 1)\n .repeat((neox_args.train_micro_batch_size_per_gpu, 1))\n .cuda()\n )\n\n # forward\n if inference:\n tokens, attention_mask, position_ids = get_batch(\n neox_args, context_tokens_tensor[:, : neox_args.seq_length]\n )\n model_inputs = (\n tokens,\n position_ids,\n attention_mask,\n torch.Tensor(),\n )\n logits, _ = forward_model(neox_args, model, model_inputs)\n elif neox_args.is_pipe_parallel:\n data_iterator = iter([{\"text\": context_tokens_tensor}])\n _, logits = model.eval_batch(data_iter=data_iterator, return_logits=True)\n else:\n tokens, attention_mask, position_ids = get_batch(\n neox_args, context_tokens_tensor[:, : neox_args.seq_length]\n )\n logits = model((tokens, position_ids, attention_mask))\n\n # reset to train mode, if model was in training before\n if model_was_in_train:\n model.train()\n\n if logits is not None:\n logits = logits.detach().cpu()[\n 0\n ] # just return first batch item (they are all equal)\n\n return logits\n\n\ndef check_forward_pass(neox_args, model, checkpoint_logits, inference):\n # do forward pass with loaded checkpoint\n logits = do_forward_pass(neox_args=neox_args, model=model, inference=inference)\n\n # check\n if (\n logits is not None and checkpoint_logits is not None\n ): # this could be the case for non-final pipeline stages\n if not (logits == checkpoint_logits).all().item():\n if mpu.get_data_parallel_rank() == 0:\n print(\n \" > WARNING: validate_checkpoint_forward() forward after load of checkpoint does not yield exactly same result\"\n )\n assert (\n torch.isclose(logits, checkpoint_logits).all().item()\n ), \"validate_checkpoint_forward() forward after load of checkpoint does not yield a close result\"\n\n\ndef ensure_directory_exists(filename):\n \"\"\"Build filename's path if it does not already exists.\"\"\"\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\ndef get_checkpoint_name(checkpoints_path, iteration, release=False, mp_rank=None):\n \"\"\"A unified checkpoint name.\"\"\"\n if release:\n directory = \"release\"\n else:\n directory = \"iter_{:07d}\".format(iteration)\n return os.path.join(\n checkpoints_path,\n directory,\n \"mp_rank_{:02d}\".format(\n mpu.get_model_parallel_rank() if mp_rank is None else mp_rank\n ),\n \"model_optim_rng.pt\",\n )\n\n\ndef delete_old_checkpoints(save_dir, n_to_keep):\n if torch.distributed.get_rank() == 0:\n ckpt_dir_regex = r\"global_step[\\d]*\"\n if save_dir.endswith(\"/\"):\n save_dir = save_dir.strip(\"/\")\n all_ckpts = natural_sort(\n [\n i\n for i in glob(f\"{save_dir}/*\")\n if os.path.isdir(i) and re.search(ckpt_dir_regex, i)\n ]\n )\n n_to_delete = len(all_ckpts) - n_to_keep\n if n_to_delete > 0:\n to_delete = all_ckpts[:n_to_delete]\n print(f\"WARNING: Deleting old checkpoints: \\n\\t{', '.join(to_delete)}\")\n for ckpt in to_delete:\n try:\n shutil.rmtree(ckpt)\n except FileNotFoundError:\n pass\n\n\ndef save_ds_checkpoint(iteration, model, neox_args):\n \"\"\"Save a model checkpoint.\"\"\"\n sd = {\n \"iteration\": iteration,\n \"args\": {\n \"num_layers\": neox_args.num_layers,\n \"hidden_size\": neox_args.hidden_size,\n \"num_attention_heads\": neox_args.num_attention_heads,\n \"max_position_embeddings\": neox_args.max_position_embeddings,\n \"make_vocab_size_divisible_by\": neox_args.make_vocab_size_divisible_by,\n \"padded_vocab_size\": neox_args.padded_vocab_size,\n \"tokenizer_type\": neox_args.tokenizer_type,\n \"model_parallel_size\": neox_args.model_parallel_size,\n },\n }\n # rng states.\n if not neox_args.no_save_rng:\n sd[\"random_rng_state\"] = random.getstate()\n sd[\"np_rng_state\"] = np.random.get_state()\n sd[\"torch_rng_state\"] = torch.get_rng_state()\n sd[\"cuda_rng_state\"] = torch.cuda.get_rng_state()\n sd[\"rng_tracker_states\"] = mpu.get_cuda_rng_tracker().get_states()\n\n if neox_args.checkpoint_validation_with_forward_pass:\n logits = do_forward_pass(neox_args=neox_args, model=model)\n sd[\"checkpoint_validation_logits\"] = logits\n\n # checkpoint folder name\n tag = f\"global_step{iteration}\"\n\n # save checkpoint\n model.save_checkpoint(neox_args.save, tag=tag, client_state=sd)\n\n # save config files\n if torch.distributed.get_rank() == 0 and neox_args.config_files is not None:\n configs_directory = os.path.join(neox_args.save, tag, \"configs\")\n os.makedirs(configs_directory, exist_ok=True)\n for config_filename, config_data in neox_args.config_files.items():\n with open(os.path.join(configs_directory, config_filename), \"w\") as f:\n if isinstance(config_data, str):\n f.write(config_data)\n else:\n json.dump(config_data, f)\n\n\ndef save_checkpoint(neox_args, iteration, model, optimizer, lr_scheduler):\n \"\"\"Save a model checkpoint.\"\"\"\n\n if neox_args.deepspeed:\n save_ds_checkpoint(iteration, model, neox_args)\n else:\n raise ValueError(\"Must be using deepspeed to use neox\")\n\n # Wait so everyone is done (necessary)\n torch.distributed.barrier()\n if neox_args.keep_last_n_checkpoints is not None:\n delete_old_checkpoints(neox_args.save, neox_args.keep_last_n_checkpoints)\n\n # Wait so everyone is done (not necessary)\n torch.distributed.barrier()\n\n\ndef load_checkpoint(\n neox_args, model, optimizer, lr_scheduler, inference=False, iteration=None\n):\n \"\"\"Load a model checkpoint and return the iteration.\"\"\"\n if neox_args.deepspeed:\n load_optim_and_scheduler = (\n not neox_args.no_load_optim\n ) # TODO: These should be configured by separate args\n if neox_args.finetune:\n load_optim_and_scheduler = False\n if iteration is not None:\n tag = f\"global_step{iteration}\"\n else:\n tag = None\n checkpoint_name, state_dict = model.load_checkpoint(\n neox_args.load,\n load_optimizer_states=load_optim_and_scheduler,\n load_lr_scheduler_states=load_optim_and_scheduler,\n tag=tag,\n )\n\n if checkpoint_name is None:\n # if an iteration is specified, we want to raise an error here rather than\n # continuing silently, since we are trying to load a specific checkpoint\n if iteration is not None:\n available_checkpoints = sorted(\n [\n int(i.name.replace(\"global_step\", \"\"))\n for i in Path(neox_args.load).glob(\"global_step*\")\n ]\n )\n raise ValueError(\n f\"Unable to load checkpoint for iteration {iteration}. \\nAvailable iterations: {pformat(available_checkpoints)}\"\n )\n if mpu.get_data_parallel_rank() == 0:\n print(\"Unable to load checkpoint.\")\n\n return 0 # iteration 0, if not checkpoint loaded\n else:\n raise ValueError(\"Must be using deepspeed to use neox\")\n\n # Set iteration.\n if neox_args.finetune:\n iteration = 0\n else:\n iteration = state_dict.get(\"iteration\") or state_dict.get(\n \"total_iters\"\n ) # total_iters backward compatible with older checkpoints\n if iteration is None:\n raise ValueError(\n f\"Unable to load iteration from checkpoint {checkpoint_name} with keys {state_dict.keys()}, exiting\"\n )\n\n # Check arguments.\n if \"args\" in state_dict:\n checkpoint_args = state_dict[\"args\"]\n check_checkpoint_args(neox_args=neox_args, checkpoint_args=checkpoint_args)\n print_rank_0(\n \" > validated currently set args with arguments in the checkpoint ...\"\n )\n else:\n print_rank_0(\" > could not find arguments in the checkpoint for validation...\")\n\n # Check loaded checkpoint with forward pass\n if neox_args.checkpoint_validation_with_forward_pass:\n if \"checkpoint_validation_logits\" in state_dict:\n check_forward_pass(\n neox_args=neox_args,\n model=model,\n checkpoint_logits=state_dict[\"checkpoint_validation_logits\"],\n inference=inference,\n )\n print_rank_0(\" > validated loaded checkpoint with forward pass ...\")\n else:\n if mpu.get_data_parallel_rank() == 0:\n print(\n \" > WARNING: checkpoint_validation_with_forward_pass is configured but no checkpoint validation data available in checkpoint {}\".format(\n checkpoint_name\n )\n )\n\n # rng states.\n if not neox_args.finetune and not neox_args.no_load_rng:\n try:\n random.setstate(state_dict[\"random_rng_state\"])\n np.random.set_state(state_dict[\"np_rng_state\"])\n torch.set_rng_state(state_dict[\"torch_rng_state\"])\n torch.cuda.set_rng_state(state_dict[\"cuda_rng_state\"])\n mpu.get_cuda_rng_tracker().set_states(state_dict[\"rng_tracker_states\"])\n except KeyError:\n print_rank_0(\n \"Unable to load optimizer from checkpoint {}. \"\n \"Specify --no-load-rng or --finetune to prevent \"\n \"attempting to load the optimizer state, \"\n \"exiting ...\".format(checkpoint_name)\n )\n sys.exit()\n\n torch.distributed.barrier()\n if mpu.get_data_parallel_rank() == 0:\n print(\" successfully loaded {}\".format(checkpoint_name))\n\n return iteration\n", "path": "megatron/checkpointing.py"}]} | 4,046 | 186 |
gh_patches_debug_8251 | rasdani/github-patches | git_diff | rootpy__rootpy-489 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List index out of range
I'm in ipython, the latest released rootpy, and I get this when I try to access a TTree called `composed`.
``` ipython
In [9]: f.composed
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-9-b49340cac05a> in <module>()
----> 1 f.composed
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in __getattr__(self, attr)
89 in Get this can end up in an "infinite" recursion and stack overflow
90 """
---> 91 return self.Get(attr)
92
93 def __getitem__(self, name):
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in get(self, name, **kwargs)
52 thing = _dir.Get(path, **kwargs)
53 else:
---> 54 thing = f(self, _name, **kwargs)
55 if isinstance(thing, _DirectoryBase):
56 thing._parent = self
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in Get(self, name, **kwargs)
118 if not thing:
119 raise DoesNotExist
--> 120 return asrootpy(thing, **kwargs)
121
122 def GetRaw(self, name):
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in asrootpy(thing, **kwargs)
113
114 thing_cls = thing.__class__
--> 115 rootpy_cls = lookup(thing_cls)
116 if rootpy_cls is None:
117 log.warn("a subclass of %s is not implemented in rootpy" %
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup(cls)
131
132 cls_name = cls.__name__
--> 133 return lookup_by_name(cls_name)
134
135
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup_by_name(cls_name)
149 path, rootpy_cls_name = '.'.join(path_tokens[:-1]), path_tokens[-1]
150 rootpy_module = __import__(
--> 151 path, globals(), locals(), [rootpy_cls_name], -1)
152 rootpy_cls = getattr(rootpy_module, rootpy_cls_name)
153 if dynamic_kwargs is not None:
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/__init__.py in <module>()
3 from .. import log; log = log[__name__]
4
----> 5 from .buffer import TreeBuffer
6 from .tree import Tree
7 from .model import TreeModel
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/buffer.py in <module>()
10 from .. import create
11 from ..core import _resetable_mixin, _copy_construct_mixin
---> 12 from .. import stl
13
14
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/stl.py in <module>()
43 from .defaults import extra_initialization
44 from .util.cpp import CPPGrammar
---> 45 from . import compiled
46 from . import userdata
47 from . import lookup_by_name, register, QROOT
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/compiled/__init__.py in <module>()
16 import ROOT
17
---> 18 import rootpy.userdata as userdata
19
20 from .. import log; log = log[__name__]
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/userdata.py in <module>()
40 DATA_ROOT = CONFIG_ROOT = None
41 if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and
---> 42 not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):
43 DATA_ROOT = ensure_directory('ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')
44 CONFIG_ROOT = ensure_directory('ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')
IndexError: list index out of range
In [10]: import sys
In [11]: sys.argv
Out[11]: []
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rootpy/userdata.py`
Content:
```
1 # Copyright 2012 the rootpy developers
2 # distributed under the terms of the GNU General Public License
3 """
4 This module handles creation of the user-data area
5 """
6 from __future__ import absolute_import
7
8 import os
9 import sys
10 import tempfile
11 import atexit
12 from os.path import expanduser, expandvars, exists, isdir, join as pjoin
13 from platform import machine
14
15 from . import log; log = log[__name__]
16 from . import QROOT
17 from .defaults import extra_initialization
18
19 __all__ = [
20 'DATA_ROOT',
21 'CONFIG_ROOT',
22 'BINARY_PATH',
23 'ARCH',
24 ]
25
26 if "XDG_CONFIG_HOME" not in os.environ:
27 os.environ["XDG_CONFIG_HOME"] = expanduser('~/.config')
28 if "XDG_CACHE_HOME" not in os.environ:
29 os.environ["XDG_CACHE_HOME"] = expanduser('~/.cache')
30
31
32 def ensure_directory(variable, default):
33 path = os.getenv(variable)
34 if path is None:
35 path = expandvars(default)
36 else:
37 path = expandvars(expanduser(path))
38
39 # check if expanduser failed:
40 if path.startswith('~'):
41 path = None
42 elif not exists(path):
43 os.makedirs(path)
44 elif not isdir(path):
45 # A file at path already exists
46 path = None
47 return path
48
49
50 DATA_ROOT = CONFIG_ROOT = None
51 if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and
52 not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):
53 DATA_ROOT = ensure_directory(
54 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')
55 CONFIG_ROOT = ensure_directory(
56 'ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')
57
58 if DATA_ROOT is None:
59 log.info("Placing user data in /tmp.")
60 log.warning(
61 "Make sure '~/.cache/rootpy' or $ROOTPY_DATA is a writable "
62 "directory so that it isn't necessary to recreate all user "
63 "data each time")
64
65 DATA_ROOT = tempfile.mkdtemp()
66
67 @atexit.register
68 def __cleanup():
69 import shutil
70 shutil.rmtree(DATA_ROOT)
71
72 BINARY_PATH = None
73
74 ARCH = "{0}-{1}".format(machine(), QROOT.gROOT.GetVersionInt())
75 if BINARY_PATH is None:
76 BINARY_PATH = pjoin(DATA_ROOT, ARCH)
77
78
79 @extra_initialization
80 def show_binary_path():
81 log.debug("Using binary path: {0}".format(BINARY_PATH))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rootpy/userdata.py b/rootpy/userdata.py
--- a/rootpy/userdata.py
+++ b/rootpy/userdata.py
@@ -48,8 +48,14 @@
DATA_ROOT = CONFIG_ROOT = None
-if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and
- not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):
+
+in_nosetests = False
+if sys.argv and sys.argv[0].endswith('nosetests'):
+ in_nosetests = True
+
+grid_mode = os.getenv('ROOTPY_GRIDMODE') in ('1', 'true')
+
+if (os.getenv('DEBUG', None) or not (grid_mode or in_nosetests)):
DATA_ROOT = ensure_directory(
'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')
CONFIG_ROOT = ensure_directory(
| {"golden_diff": "diff --git a/rootpy/userdata.py b/rootpy/userdata.py\n--- a/rootpy/userdata.py\n+++ b/rootpy/userdata.py\n@@ -48,8 +48,14 @@\n \n \n DATA_ROOT = CONFIG_ROOT = None\n-if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and\n- not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):\n+\n+in_nosetests = False\n+if sys.argv and sys.argv[0].endswith('nosetests'):\n+ in_nosetests = True\n+\n+grid_mode = os.getenv('ROOTPY_GRIDMODE') in ('1', 'true')\n+\n+if (os.getenv('DEBUG', None) or not (grid_mode or in_nosetests)):\n DATA_ROOT = ensure_directory(\n 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n CONFIG_ROOT = ensure_directory(\n", "issue": "List index out of range\nI'm in ipython, the latest released rootpy, and I get this when I try to access a TTree called `composed`.\n\n``` ipython\nIn [9]: f.composed\n---------------------------------------------------------------------------\nIndexError Traceback (most recent call last)\n<ipython-input-9-b49340cac05a> in <module>()\n----> 1 f.composed\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in __getattr__(self, attr)\n 89 in Get this can end up in an \"infinite\" recursion and stack overflow\n 90 \"\"\"\n---> 91 return self.Get(attr)\n 92 \n 93 def __getitem__(self, name):\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in get(self, name, **kwargs)\n 52 thing = _dir.Get(path, **kwargs)\n 53 else:\n---> 54 thing = f(self, _name, **kwargs)\n 55 if isinstance(thing, _DirectoryBase):\n 56 thing._parent = self\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in Get(self, name, **kwargs)\n 118 if not thing:\n 119 raise DoesNotExist\n--> 120 return asrootpy(thing, **kwargs)\n 121 \n 122 def GetRaw(self, name):\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in asrootpy(thing, **kwargs)\n 113 \n 114 thing_cls = thing.__class__\n--> 115 rootpy_cls = lookup(thing_cls)\n 116 if rootpy_cls is None:\n 117 log.warn(\"a subclass of %s is not implemented in rootpy\" %\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup(cls)\n 131 \n 132 cls_name = cls.__name__\n--> 133 return lookup_by_name(cls_name)\n 134 \n 135 \n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup_by_name(cls_name)\n 149 path, rootpy_cls_name = '.'.join(path_tokens[:-1]), path_tokens[-1]\n 150 rootpy_module = __import__(\n--> 151 path, globals(), locals(), [rootpy_cls_name], -1)\n 152 rootpy_cls = getattr(rootpy_module, rootpy_cls_name)\n 153 if dynamic_kwargs is not None:\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/__init__.py in <module>()\n 3 from .. import log; log = log[__name__]\n 4 \n----> 5 from .buffer import TreeBuffer\n 6 from .tree import Tree\n 7 from .model import TreeModel\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/buffer.py in <module>()\n 10 from .. import create\n 11 from ..core import _resetable_mixin, _copy_construct_mixin\n---> 12 from .. import stl\n 13 \n 14 \n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/stl.py in <module>()\n 43 from .defaults import extra_initialization\n 44 from .util.cpp import CPPGrammar\n---> 45 from . import compiled\n 46 from . import userdata\n 47 from . import lookup_by_name, register, QROOT\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/compiled/__init__.py in <module>()\n 16 import ROOT\n 17 \n---> 18 import rootpy.userdata as userdata\n 19 \n 20 from .. import log; log = log[__name__]\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/userdata.py in <module>()\n 40 DATA_ROOT = CONFIG_ROOT = None\n 41 if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and\n---> 42 not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):\n 43 DATA_ROOT = ensure_directory('ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n 44 CONFIG_ROOT = ensure_directory('ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')\n\nIndexError: list index out of range\n\nIn [10]: import sys\n\nIn [11]: sys.argv\nOut[11]: []\n```\n\n", "before_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThis module handles creation of the user-data area\n\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport tempfile\nimport atexit\nfrom os.path import expanduser, expandvars, exists, isdir, join as pjoin\nfrom platform import machine\n\nfrom . import log; log = log[__name__]\nfrom . import QROOT\nfrom .defaults import extra_initialization\n\n__all__ = [\n 'DATA_ROOT',\n 'CONFIG_ROOT',\n 'BINARY_PATH',\n 'ARCH',\n]\n\nif \"XDG_CONFIG_HOME\" not in os.environ:\n os.environ[\"XDG_CONFIG_HOME\"] = expanduser('~/.config')\nif \"XDG_CACHE_HOME\" not in os.environ:\n os.environ[\"XDG_CACHE_HOME\"] = expanduser('~/.cache')\n\n\ndef ensure_directory(variable, default):\n path = os.getenv(variable)\n if path is None:\n path = expandvars(default)\n else:\n path = expandvars(expanduser(path))\n\n # check if expanduser failed:\n if path.startswith('~'):\n path = None\n elif not exists(path):\n os.makedirs(path)\n elif not isdir(path):\n # A file at path already exists\n path = None\n return path\n\n\nDATA_ROOT = CONFIG_ROOT = None\nif (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and\n not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):\n DATA_ROOT = ensure_directory(\n 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n CONFIG_ROOT = ensure_directory(\n 'ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')\n\nif DATA_ROOT is None:\n log.info(\"Placing user data in /tmp.\")\n log.warning(\n \"Make sure '~/.cache/rootpy' or $ROOTPY_DATA is a writable \"\n \"directory so that it isn't necessary to recreate all user \"\n \"data each time\")\n\n DATA_ROOT = tempfile.mkdtemp()\n\n @atexit.register\n def __cleanup():\n import shutil\n shutil.rmtree(DATA_ROOT)\n\nBINARY_PATH = None\n\nARCH = \"{0}-{1}\".format(machine(), QROOT.gROOT.GetVersionInt())\nif BINARY_PATH is None:\n BINARY_PATH = pjoin(DATA_ROOT, ARCH)\n\n\n@extra_initialization\ndef show_binary_path():\n log.debug(\"Using binary path: {0}\".format(BINARY_PATH))\n", "path": "rootpy/userdata.py"}], "after_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThis module handles creation of the user-data area\n\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport tempfile\nimport atexit\nfrom os.path import expanduser, expandvars, exists, isdir, join as pjoin\nfrom platform import machine\n\nfrom . import log; log = log[__name__]\nfrom . import QROOT\nfrom .defaults import extra_initialization\n\n__all__ = [\n 'DATA_ROOT',\n 'CONFIG_ROOT',\n 'BINARY_PATH',\n 'ARCH',\n]\n\nif \"XDG_CONFIG_HOME\" not in os.environ:\n os.environ[\"XDG_CONFIG_HOME\"] = expanduser('~/.config')\nif \"XDG_CACHE_HOME\" not in os.environ:\n os.environ[\"XDG_CACHE_HOME\"] = expanduser('~/.cache')\n\n\ndef ensure_directory(variable, default):\n path = os.getenv(variable)\n if path is None:\n path = expandvars(default)\n else:\n path = expandvars(expanduser(path))\n\n # check if expanduser failed:\n if path.startswith('~'):\n path = None\n elif not exists(path):\n os.makedirs(path)\n elif not isdir(path):\n # A file at path already exists\n path = None\n return path\n\n\nDATA_ROOT = CONFIG_ROOT = None\n\nin_nosetests = False\nif sys.argv and sys.argv[0].endswith('nosetests'):\n in_nosetests = True\n\ngrid_mode = os.getenv('ROOTPY_GRIDMODE') in ('1', 'true')\n\nif (os.getenv('DEBUG', None) or not (grid_mode or in_nosetests)):\n DATA_ROOT = ensure_directory(\n 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n CONFIG_ROOT = ensure_directory(\n 'ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')\n\nif DATA_ROOT is None:\n log.info(\"Placing user data in /tmp.\")\n log.warning(\n \"Make sure '~/.cache/rootpy' or $ROOTPY_DATA is a writable \"\n \"directory so that it isn't necessary to recreate all user \"\n \"data each time\")\n\n DATA_ROOT = tempfile.mkdtemp()\n\n @atexit.register\n def __cleanup():\n import shutil\n shutil.rmtree(DATA_ROOT)\n\nBINARY_PATH = None\n\nARCH = \"{0}-{1}\".format(machine(), QROOT.gROOT.GetVersionInt())\nif BINARY_PATH is None:\n BINARY_PATH = pjoin(DATA_ROOT, ARCH)\n\n\n@extra_initialization\ndef show_binary_path():\n log.debug(\"Using binary path: {0}\".format(BINARY_PATH))\n", "path": "rootpy/userdata.py"}]} | 2,057 | 206 |
gh_patches_debug_7504 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3
4 from types import MethodType
5 from typing import Optional, Tuple
6
7 import torch
8 import torch.nn.functional as F
9 from transformers.models.llama.modeling_llama import (
10 LlamaRMSNorm,
11 LlamaAttention,
12 LlamaModel,
13 LlamaForCausalLM,
14 apply_rotary_pos_emb,
15 repeat_kv,
16 )
17
18 from colossalai.logging import get_dist_logger
19 from einops import rearrange
20
21 from flash_attn.bert_padding import pad_input, unpad_input
22 from flash_attn.flash_attn_interface import (
23 flash_attn_func,
24 flash_attn_varlen_kvpacked_func,
25 )
26 from flash_attn.ops.rms_norm import rms_norm
27
28
29 logger = get_dist_logger()
30
31
32 def _prepare_decoder_attention_mask(
33 self: LlamaModel,
34 attention_mask: torch.BoolTensor,
35 input_shape: torch.Size,
36 inputs_embeds: torch.Tensor,
37 past_key_values_length: int,
38 ) -> Optional[torch.Tensor]:
39 """
40 Decoder attetion mask
41 """
42 if past_key_values_length > 0 and attention_mask is not None:
43 attention_mask = torch.cat(
44 tensors=(
45 torch.full(
46 size=(input_shape[0], past_key_values_length),
47 fill_value=True,
48 dtype=attention_mask.dtype,
49 device=attention_mask.device,
50 ),
51 attention_mask,
52 ),
53 dim=-1,
54 ) # (bsz, past_key_values_length + q_len)
55 if attention_mask is not None and torch.all(attention_mask):
56 return None # Faster
57 return attention_mask
58
59
60 def attention_forward(
61 self: LlamaAttention,
62 hidden_states: torch.Tensor,
63 attention_mask: Optional[torch.Tensor] = None,
64 position_ids: Optional[torch.LongTensor] = None,
65 past_key_value: Optional[Tuple[torch.Tensor]] = None,
66 output_attentions: bool = False,
67 use_cache: bool = False,
68 ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
69 """
70 Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.
71 """
72 if output_attentions:
73 logger.warning(
74 "Argument `output_attentions` is not supported for flash-attention patched `LlamaAttention`, "
75 "return `None` instead."
76 )
77
78 bsz, q_len, _ = hidden_states.size()
79
80 if self.config.pretraining_tp > 1:
81 q_slicing, kv_slicing = (
82 dim // self.config.pretraining_tp
83 for dim in (
84 self.num_heads * self.head_dim,
85 self.num_key_value_heads * self.head_dim,
86 )
87 ) # `Tuple[int, int]`
88 q_slices, k_slices, v_slices = (
89 proj.weight.split(slicing, dim=0)
90 for proj, slicing in (
91 (self.q_proj, q_slicing),
92 (self.k_proj, kv_slicing),
93 (self.v_proj, kv_slicing),
94 )
95 ) # Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor], Tuple[torch.Tensor]]
96 q, k, v = (
97 torch.cat(
98 [F.linear(hidden_states, slices[i]) for i in range(self.config.pretraining_tp)],
99 dim=-1,
100 )
101 for slices in (q_slices, k_slices, v_slices)
102 )
103 # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:
104 # (bsz, q_len, num_heads * head_dim),
105 # (bsz, q_len, num_key_value_heads * head_dim),
106 # (bsz, q_len, num_key_value_heads * head_dim)
107 else:
108 q, k, v = (proj(hidden_states) for proj in (self.q_proj, self.k_proj, self.v_proj))
109 # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:
110 # (bsz, q_len, num_heads * head_dim),
111 # (bsz, q_len, num_key_value_heads * head_dim),
112 # (bsz, q_len, num_key_value_heads * head_dim)
113
114 # (bsz, q_len, num_heads * head_dim) -> (bsz, num_heads, q_len, head_dim);
115 # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim);
116 # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim)
117 q, k, v = (
118 states.view(bsz, q_len, num_heads, self.head_dim).transpose(1, 2)
119 for states, num_heads in (
120 (q, self.num_heads),
121 (k, self.num_key_value_heads),
122 (v, self.num_key_value_heads),
123 )
124 )
125 kv_len = k.shape[-2] # initially, `kv_len` == `q_len`
126 past_kv_len = 0
127 if past_key_value is not None:
128 # if `past_key_value` is not None, `kv_len` > `q_len`.
129 past_kv_len = past_key_value[0].shape[-2]
130 kv_len += past_kv_len
131
132 # two `torch.Tensor` objs of shape (1, 1, kv_len, head_dim)
133 cos, sin = self.rotary_emb(v, seq_len=kv_len)
134 # (bsz, num_heads, q_len, head_dim), (bsz, num_key_value_heads, q_len, head_dim)
135 q, k = apply_rotary_pos_emb(q=q, k=k, cos=cos, sin=sin, position_ids=position_ids)
136 if past_key_value is not None:
137 # reuse k, v, self_attention
138 k = torch.cat([past_key_value[0], k], dim=2)
139 v = torch.cat([past_key_value[1], v], dim=2)
140
141 past_key_value = (k, v) if use_cache else None
142
143 # repeat k/v heads if n_kv_heads < n_heads
144 k = repeat_kv(hidden_states=k, n_rep=self.num_key_value_groups)
145 # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)
146 v = repeat_kv(hidden_states=v, n_rep=self.num_key_value_groups)
147 # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)
148
149 key_padding_mask = attention_mask
150 # (bsz, num_heads, q_len, head_dim) -> (bsz, q_len, num_heads, head_dim)
151 q, k, v = (states.transpose(1, 2) for states in (q, k, v))
152
153 if past_kv_len > 0:
154 q = torch.cat(
155 tensors=(
156 torch.full(
157 size=(bsz, past_kv_len, self.num_heads, self.head_dim),
158 fill_value=0.0,
159 dtype=q.dtype,
160 device=q.device,
161 ),
162 q,
163 ),
164 dim=1,
165 ) # (bsz, past_kv_len + q_len, num_heads, head_dim)
166
167 if key_padding_mask is None:
168 # (bsz, past_kv_len + q_len, num_heads, head_dim)
169 output = flash_attn_func(q=q, k=k, v=v, dropout_p=0.0, softmax_scale=None, causal=True) # (bsz, )
170 output = rearrange(output, pattern="... h d -> ... (h d)") # (bsz, past_kv_len + q_len, num_heads * head_dim)
171 else:
172 q, indices, cu_q_lens, max_q_len = unpad_input(hidden_states=q, attention_mask=key_padding_mask)
173 kv, _, cu_kv_lens, max_kv_len = unpad_input(
174 hidden_states=torch.stack(tensors=(k, v), dim=2),
175 attention_mask=key_padding_mask,
176 )
177 output_unpad = flash_attn_varlen_kvpacked_func(
178 q=q,
179 kv=kv,
180 cu_seqlens_q=cu_q_lens,
181 cu_seqlens_k=cu_kv_lens,
182 max_seqlen_q=max_q_len,
183 max_seqlen_k=max_kv_len,
184 dropout_p=0.0,
185 softmax_scale=None,
186 causal=True,
187 )
188 output = pad_input(
189 hidden_states=rearrange(output_unpad, pattern="nnz h d -> nnz (h d)"),
190 indices=indices,
191 batch=bsz,
192 seqlen=past_kv_len + q_len,
193 ) # (bsz, past_kv_len + q_len, num_heads * head_dim)
194
195 if past_kv_len > 0:
196 # Strip off the zero query outputs.
197 output = output[:, past_kv_len:, ...] # (bsz, q_len, num_heads * head_dim)
198 output = self.o_proj(output) # (bsz, q_len, hidden_size)
199 return output, None, past_key_value
200
201
202 def rms_norm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor) -> torch.Tensor:
203 """
204 Formard function for RMS Norm
205 """
206 return rms_norm(x=hidden_states, weight=self.weight, epsilon=self.variance_epsilon)
207
208
209 def replace_with_flash_attention(model: LlamaForCausalLM) -> None:
210 for name, module in model.named_modules():
211 if isinstance(module, LlamaAttention):
212 module.forward = MethodType(attention_forward, module)
213 if isinstance(module, LlamaModel):
214 module._prepare_decoder_attention_mask = MethodType(_prepare_decoder_attention_mask, module)
215 if isinstance(module, LlamaRMSNorm):
216 module.forward = MethodType(rms_norm_forward, module)
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py
--- a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py
+++ b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py
@@ -65,6 +65,7 @@
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
+ **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""
Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.
| {"golden_diff": "diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py\n--- a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py\n+++ b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py\n@@ -65,6 +65,7 @@\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n+ **kwargs\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"\n Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom types import MethodType\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom transformers.models.llama.modeling_llama import (\n LlamaRMSNorm,\n LlamaAttention,\n LlamaModel,\n LlamaForCausalLM,\n apply_rotary_pos_emb,\n repeat_kv,\n)\n\nfrom colossalai.logging import get_dist_logger\nfrom einops import rearrange\n\nfrom flash_attn.bert_padding import pad_input, unpad_input\nfrom flash_attn.flash_attn_interface import (\n flash_attn_func,\n flash_attn_varlen_kvpacked_func,\n)\nfrom flash_attn.ops.rms_norm import rms_norm\n\n\nlogger = get_dist_logger()\n\n\ndef _prepare_decoder_attention_mask(\n self: LlamaModel,\n attention_mask: torch.BoolTensor,\n input_shape: torch.Size,\n inputs_embeds: torch.Tensor,\n past_key_values_length: int,\n) -> Optional[torch.Tensor]:\n \"\"\"\n Decoder attetion mask\n \"\"\"\n if past_key_values_length > 0 and attention_mask is not None:\n attention_mask = torch.cat(\n tensors=(\n torch.full(\n size=(input_shape[0], past_key_values_length),\n fill_value=True,\n dtype=attention_mask.dtype,\n device=attention_mask.device,\n ),\n attention_mask,\n ),\n dim=-1,\n ) # (bsz, past_key_values_length + q_len)\n if attention_mask is not None and torch.all(attention_mask):\n return None # Faster\n return attention_mask\n\n\ndef attention_forward(\n self: LlamaAttention,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"\n Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.\n \"\"\"\n if output_attentions:\n logger.warning(\n \"Argument `output_attentions` is not supported for flash-attention patched `LlamaAttention`, \"\n \"return `None` instead.\"\n )\n\n bsz, q_len, _ = hidden_states.size()\n\n if self.config.pretraining_tp > 1:\n q_slicing, kv_slicing = (\n dim // self.config.pretraining_tp\n for dim in (\n self.num_heads * self.head_dim,\n self.num_key_value_heads * self.head_dim,\n )\n ) # `Tuple[int, int]`\n q_slices, k_slices, v_slices = (\n proj.weight.split(slicing, dim=0)\n for proj, slicing in (\n (self.q_proj, q_slicing),\n (self.k_proj, kv_slicing),\n (self.v_proj, kv_slicing),\n )\n ) # Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor], Tuple[torch.Tensor]]\n q, k, v = (\n torch.cat(\n [F.linear(hidden_states, slices[i]) for i in range(self.config.pretraining_tp)],\n dim=-1,\n )\n for slices in (q_slices, k_slices, v_slices)\n )\n # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:\n # (bsz, q_len, num_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim)\n else:\n q, k, v = (proj(hidden_states) for proj in (self.q_proj, self.k_proj, self.v_proj))\n # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:\n # (bsz, q_len, num_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim)\n\n # (bsz, q_len, num_heads * head_dim) -> (bsz, num_heads, q_len, head_dim);\n # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim);\n # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim)\n q, k, v = (\n states.view(bsz, q_len, num_heads, self.head_dim).transpose(1, 2)\n for states, num_heads in (\n (q, self.num_heads),\n (k, self.num_key_value_heads),\n (v, self.num_key_value_heads),\n )\n )\n kv_len = k.shape[-2] # initially, `kv_len` == `q_len`\n past_kv_len = 0\n if past_key_value is not None:\n # if `past_key_value` is not None, `kv_len` > `q_len`.\n past_kv_len = past_key_value[0].shape[-2]\n kv_len += past_kv_len\n\n # two `torch.Tensor` objs of shape (1, 1, kv_len, head_dim)\n cos, sin = self.rotary_emb(v, seq_len=kv_len)\n # (bsz, num_heads, q_len, head_dim), (bsz, num_key_value_heads, q_len, head_dim)\n q, k = apply_rotary_pos_emb(q=q, k=k, cos=cos, sin=sin, position_ids=position_ids)\n if past_key_value is not None:\n # reuse k, v, self_attention\n k = torch.cat([past_key_value[0], k], dim=2)\n v = torch.cat([past_key_value[1], v], dim=2)\n\n past_key_value = (k, v) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n k = repeat_kv(hidden_states=k, n_rep=self.num_key_value_groups)\n # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)\n v = repeat_kv(hidden_states=v, n_rep=self.num_key_value_groups)\n # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)\n\n key_padding_mask = attention_mask\n # (bsz, num_heads, q_len, head_dim) -> (bsz, q_len, num_heads, head_dim)\n q, k, v = (states.transpose(1, 2) for states in (q, k, v))\n\n if past_kv_len > 0:\n q = torch.cat(\n tensors=(\n torch.full(\n size=(bsz, past_kv_len, self.num_heads, self.head_dim),\n fill_value=0.0,\n dtype=q.dtype,\n device=q.device,\n ),\n q,\n ),\n dim=1,\n ) # (bsz, past_kv_len + q_len, num_heads, head_dim)\n\n if key_padding_mask is None:\n # (bsz, past_kv_len + q_len, num_heads, head_dim)\n output = flash_attn_func(q=q, k=k, v=v, dropout_p=0.0, softmax_scale=None, causal=True) # (bsz, )\n output = rearrange(output, pattern=\"... h d -> ... (h d)\") # (bsz, past_kv_len + q_len, num_heads * head_dim)\n else:\n q, indices, cu_q_lens, max_q_len = unpad_input(hidden_states=q, attention_mask=key_padding_mask)\n kv, _, cu_kv_lens, max_kv_len = unpad_input(\n hidden_states=torch.stack(tensors=(k, v), dim=2),\n attention_mask=key_padding_mask,\n )\n output_unpad = flash_attn_varlen_kvpacked_func(\n q=q,\n kv=kv,\n cu_seqlens_q=cu_q_lens,\n cu_seqlens_k=cu_kv_lens,\n max_seqlen_q=max_q_len,\n max_seqlen_k=max_kv_len,\n dropout_p=0.0,\n softmax_scale=None,\n causal=True,\n )\n output = pad_input(\n hidden_states=rearrange(output_unpad, pattern=\"nnz h d -> nnz (h d)\"),\n indices=indices,\n batch=bsz,\n seqlen=past_kv_len + q_len,\n ) # (bsz, past_kv_len + q_len, num_heads * head_dim)\n\n if past_kv_len > 0:\n # Strip off the zero query outputs.\n output = output[:, past_kv_len:, ...] # (bsz, q_len, num_heads * head_dim)\n output = self.o_proj(output) # (bsz, q_len, hidden_size)\n return output, None, past_key_value\n\n\ndef rms_norm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Formard function for RMS Norm\n \"\"\"\n return rms_norm(x=hidden_states, weight=self.weight, epsilon=self.variance_epsilon)\n\n\ndef replace_with_flash_attention(model: LlamaForCausalLM) -> None:\n for name, module in model.named_modules():\n if isinstance(module, LlamaAttention):\n module.forward = MethodType(attention_forward, module)\n if isinstance(module, LlamaModel):\n module._prepare_decoder_attention_mask = MethodType(_prepare_decoder_attention_mask, module)\n if isinstance(module, LlamaRMSNorm):\n module.forward = MethodType(rms_norm_forward, module)\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom types import MethodType\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom transformers.models.llama.modeling_llama import (\n LlamaRMSNorm,\n LlamaAttention,\n LlamaModel,\n LlamaForCausalLM,\n apply_rotary_pos_emb,\n repeat_kv,\n)\n\nfrom colossalai.logging import get_dist_logger\nfrom einops import rearrange\n\nfrom flash_attn.bert_padding import pad_input, unpad_input\nfrom flash_attn.flash_attn_interface import (\n flash_attn_func,\n flash_attn_varlen_kvpacked_func,\n)\nfrom flash_attn.ops.rms_norm import rms_norm\n\n\nlogger = get_dist_logger()\n\n\ndef _prepare_decoder_attention_mask(\n self: LlamaModel,\n attention_mask: torch.BoolTensor,\n input_shape: torch.Size,\n inputs_embeds: torch.Tensor,\n past_key_values_length: int,\n) -> Optional[torch.Tensor]:\n \"\"\"\n Decoder attetion mask\n \"\"\"\n if past_key_values_length > 0 and attention_mask is not None:\n attention_mask = torch.cat(\n tensors=(\n torch.full(\n size=(input_shape[0], past_key_values_length),\n fill_value=True,\n dtype=attention_mask.dtype,\n device=attention_mask.device,\n ),\n attention_mask,\n ),\n dim=-1,\n ) # (bsz, past_key_values_length + q_len)\n if attention_mask is not None and torch.all(attention_mask):\n return None # Faster\n return attention_mask\n\n\ndef attention_forward(\n self: LlamaAttention,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n **kwargs\n) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"\n Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.\n \"\"\"\n if output_attentions:\n logger.warning(\n \"Argument `output_attentions` is not supported for flash-attention patched `LlamaAttention`, \"\n \"return `None` instead.\"\n )\n\n bsz, q_len, _ = hidden_states.size()\n\n if self.config.pretraining_tp > 1:\n q_slicing, kv_slicing = (\n dim // self.config.pretraining_tp\n for dim in (\n self.num_heads * self.head_dim,\n self.num_key_value_heads * self.head_dim,\n )\n ) # `Tuple[int, int]`\n q_slices, k_slices, v_slices = (\n proj.weight.split(slicing, dim=0)\n for proj, slicing in (\n (self.q_proj, q_slicing),\n (self.k_proj, kv_slicing),\n (self.v_proj, kv_slicing),\n )\n ) # Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor], Tuple[torch.Tensor]]\n q, k, v = (\n torch.cat(\n [F.linear(hidden_states, slices[i]) for i in range(self.config.pretraining_tp)],\n dim=-1,\n )\n for slices in (q_slices, k_slices, v_slices)\n )\n # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:\n # (bsz, q_len, num_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim)\n else:\n q, k, v = (proj(hidden_states) for proj in (self.q_proj, self.k_proj, self.v_proj))\n # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:\n # (bsz, q_len, num_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim)\n\n # (bsz, q_len, num_heads * head_dim) -> (bsz, num_heads, q_len, head_dim);\n # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim);\n # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim)\n q, k, v = (\n states.view(bsz, q_len, num_heads, self.head_dim).transpose(1, 2)\n for states, num_heads in (\n (q, self.num_heads),\n (k, self.num_key_value_heads),\n (v, self.num_key_value_heads),\n )\n )\n kv_len = k.shape[-2] # initially, `kv_len` == `q_len`\n past_kv_len = 0\n if past_key_value is not None:\n # if `past_key_value` is not None, `kv_len` > `q_len`.\n past_kv_len = past_key_value[0].shape[-2]\n kv_len += past_kv_len\n\n # two `torch.Tensor` objs of shape (1, 1, kv_len, head_dim)\n cos, sin = self.rotary_emb(v, seq_len=kv_len)\n # (bsz, num_heads, q_len, head_dim), (bsz, num_key_value_heads, q_len, head_dim)\n q, k = apply_rotary_pos_emb(q=q, k=k, cos=cos, sin=sin, position_ids=position_ids)\n if past_key_value is not None:\n # reuse k, v, self_attention\n k = torch.cat([past_key_value[0], k], dim=2)\n v = torch.cat([past_key_value[1], v], dim=2)\n\n past_key_value = (k, v) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n k = repeat_kv(hidden_states=k, n_rep=self.num_key_value_groups)\n # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)\n v = repeat_kv(hidden_states=v, n_rep=self.num_key_value_groups)\n # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)\n\n key_padding_mask = attention_mask\n # (bsz, num_heads, q_len, head_dim) -> (bsz, q_len, num_heads, head_dim)\n q, k, v = (states.transpose(1, 2) for states in (q, k, v))\n\n if past_kv_len > 0:\n q = torch.cat(\n tensors=(\n torch.full(\n size=(bsz, past_kv_len, self.num_heads, self.head_dim),\n fill_value=0.0,\n dtype=q.dtype,\n device=q.device,\n ),\n q,\n ),\n dim=1,\n ) # (bsz, past_kv_len + q_len, num_heads, head_dim)\n\n if key_padding_mask is None:\n # (bsz, past_kv_len + q_len, num_heads, head_dim)\n output = flash_attn_func(q=q, k=k, v=v, dropout_p=0.0, softmax_scale=None, causal=True) # (bsz, )\n output = rearrange(output, pattern=\"... h d -> ... (h d)\") # (bsz, past_kv_len + q_len, num_heads * head_dim)\n else:\n q, indices, cu_q_lens, max_q_len = unpad_input(hidden_states=q, attention_mask=key_padding_mask)\n kv, _, cu_kv_lens, max_kv_len = unpad_input(\n hidden_states=torch.stack(tensors=(k, v), dim=2),\n attention_mask=key_padding_mask,\n )\n output_unpad = flash_attn_varlen_kvpacked_func(\n q=q,\n kv=kv,\n cu_seqlens_q=cu_q_lens,\n cu_seqlens_k=cu_kv_lens,\n max_seqlen_q=max_q_len,\n max_seqlen_k=max_kv_len,\n dropout_p=0.0,\n softmax_scale=None,\n causal=True,\n )\n output = pad_input(\n hidden_states=rearrange(output_unpad, pattern=\"nnz h d -> nnz (h d)\"),\n indices=indices,\n batch=bsz,\n seqlen=past_kv_len + q_len,\n ) # (bsz, past_kv_len + q_len, num_heads * head_dim)\n\n if past_kv_len > 0:\n # Strip off the zero query outputs.\n output = output[:, past_kv_len:, ...] # (bsz, q_len, num_heads * head_dim)\n output = self.o_proj(output) # (bsz, q_len, hidden_size)\n return output, None, past_key_value\n\n\ndef rms_norm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Formard function for RMS Norm\n \"\"\"\n return rms_norm(x=hidden_states, weight=self.weight, epsilon=self.variance_epsilon)\n\n\ndef replace_with_flash_attention(model: LlamaForCausalLM) -> None:\n for name, module in model.named_modules():\n if isinstance(module, LlamaAttention):\n module.forward = MethodType(attention_forward, module)\n if isinstance(module, LlamaModel):\n module._prepare_decoder_attention_mask = MethodType(_prepare_decoder_attention_mask, module)\n if isinstance(module, LlamaRMSNorm):\n module.forward = MethodType(rms_norm_forward, module)\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py"}]} | 2,993 | 196 |
gh_patches_debug_7373 | rasdani/github-patches | git_diff | wagtail__wagtail-1274 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AdminAutoHeightTextInput doesn't auto-height on page load
`wagtailadmin.widgets.AdminAutoHeightTextInput` appears now to be the new default widget for `django.db.models.TextField`. It auto-heights when you type into it

…but on reloading the page, the auto-heighting appears not to occur. This is four rows of text after reloading the page:

Three rows, with the exact same widget height:

But two rows with a reduced widget height:

@davecranwell suggested an off-by-one error, which seems reasonable, given the slightly different heights above.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailadmin/widgets.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import json
4
5 from django.core.urlresolvers import reverse
6 from django.forms import widgets
7 from django.contrib.contenttypes.models import ContentType
8 from django.utils.translation import ugettext_lazy as _
9 from django.template.loader import render_to_string
10
11 from wagtail.utils.widgets import WidgetWithScript
12 from wagtail.wagtailcore.models import Page
13
14 from taggit.forms import TagWidget
15
16
17 class AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):
18 def __init__(self, attrs=None):
19 # Use more appropriate rows default, given autoheight will alter this anyway
20 default_attrs = {'rows': '1'}
21 if attrs:
22 default_attrs.update(attrs)
23
24 super(AdminAutoHeightTextInput, self).__init__(default_attrs)
25
26 def render_js_init(self, id_, name, value):
27 return '$("#{0}").autosize();'.format(id_)
28
29 class AdminDateInput(WidgetWithScript, widgets.DateInput):
30 # Set a default date format to match the one that our JS date picker expects -
31 # it can still be overridden explicitly, but this way it won't be affected by
32 # the DATE_INPUT_FORMATS setting
33 def __init__(self, attrs=None, format='%Y-%m-%d'):
34 super(AdminDateInput, self).__init__(attrs=attrs, format=format)
35
36 def render_js_init(self, id_, name, value):
37 return 'initDateChooser({0});'.format(json.dumps(id_))
38
39
40 class AdminTimeInput(WidgetWithScript, widgets.TimeInput):
41 def __init__(self, attrs=None, format='%H:%M'):
42 super(AdminTimeInput, self).__init__(attrs=attrs, format=format)
43
44 def render_js_init(self, id_, name, value):
45 return 'initTimeChooser({0});'.format(json.dumps(id_))
46
47
48 class AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):
49 def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):
50 super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)
51
52 def render_js_init(self, id_, name, value):
53 return 'initDateTimeChooser({0});'.format(json.dumps(id_))
54
55
56 class AdminTagWidget(WidgetWithScript, TagWidget):
57 def render_js_init(self, id_, name, value):
58 return "initTagField({0}, {1});".format(
59 json.dumps(id_),
60 json.dumps(reverse('wagtailadmin_tag_autocomplete')))
61
62
63 class AdminChooser(WidgetWithScript, widgets.Input):
64 input_type = 'hidden'
65 choose_one_text = _("Choose an item")
66 choose_another_text = _("Choose another item")
67 clear_choice_text = _("Clear choice")
68 link_to_chosen_text = _("Edit this item")
69
70 def get_instance(self, model_class, value):
71 # helper method for cleanly turning 'value' into an instance object
72 if value is None:
73 return None
74
75 try:
76 return model_class.objects.get(pk=value)
77 except model_class.DoesNotExist:
78 return None
79
80 def get_instance_and_id(self, model_class, value):
81 if value is None:
82 return (None, None)
83 elif isinstance(value, model_class):
84 return (value, value.pk)
85 else:
86 try:
87 return (model_class.objects.get(pk=value), value)
88 except model_class.DoesNotExist:
89 return (None, None)
90
91 def value_from_datadict(self, data, files, name):
92 # treat the empty string as None
93 result = super(AdminChooser, self).value_from_datadict(data, files, name)
94 if result == '':
95 return None
96 else:
97 return result
98
99 def __init__(self, **kwargs):
100 # allow choose_one_text / choose_another_text to be overridden per-instance
101 if 'choose_one_text' in kwargs:
102 self.choose_one_text = kwargs.pop('choose_one_text')
103 if 'choose_another_text' in kwargs:
104 self.choose_another_text = kwargs.pop('choose_another_text')
105 if 'clear_choice_text' in kwargs:
106 self.clear_choice_text = kwargs.pop('clear_choice_text')
107 if 'link_to_chosen_text' in kwargs:
108 self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
109 super(AdminChooser, self).__init__(**kwargs)
110
111
112 class AdminPageChooser(AdminChooser):
113 target_content_type = None
114 choose_one_text = _('Choose a page')
115 choose_another_text = _('Choose another page')
116 link_to_chosen_text = _('Edit this page')
117
118 def __init__(self, content_type=None, **kwargs):
119 super(AdminPageChooser, self).__init__(**kwargs)
120 self.target_content_type = content_type or ContentType.objects.get_for_model(Page)
121
122 def render_html(self, name, value, attrs):
123 model_class = self.target_content_type.model_class()
124 instance, value = self.get_instance_and_id(model_class, value)
125
126 original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)
127
128 return render_to_string("wagtailadmin/widgets/page_chooser.html", {
129 'widget': self,
130 'original_field_html': original_field_html,
131 'attrs': attrs,
132 'value': value,
133 'page': instance,
134 })
135
136 def render_js_init(self, id_, name, value):
137 model_class = self.target_content_type.model_class()
138 if isinstance(value, model_class):
139 page = value
140 else:
141 page = self.get_instance(model_class, value)
142 parent = page.get_parent() if page else None
143 content_type = self.target_content_type
144
145 return "createPageChooser({id}, {content_type}, {parent});".format(
146 id=json.dumps(id_),
147 content_type=json.dumps('{app}.{model}'.format(
148 app=content_type.app_label,
149 model=content_type.model)),
150 parent=json.dumps(parent.id if parent else None))
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailadmin/widgets.py b/wagtail/wagtailadmin/widgets.py
--- a/wagtail/wagtailadmin/widgets.py
+++ b/wagtail/wagtailadmin/widgets.py
@@ -24,7 +24,7 @@
super(AdminAutoHeightTextInput, self).__init__(default_attrs)
def render_js_init(self, id_, name, value):
- return '$("#{0}").autosize();'.format(id_)
+ return 'autosize($("#{0}"));'.format(id_)
class AdminDateInput(WidgetWithScript, widgets.DateInput):
# Set a default date format to match the one that our JS date picker expects -
| {"golden_diff": "diff --git a/wagtail/wagtailadmin/widgets.py b/wagtail/wagtailadmin/widgets.py\n--- a/wagtail/wagtailadmin/widgets.py\n+++ b/wagtail/wagtailadmin/widgets.py\n@@ -24,7 +24,7 @@\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n \n def render_js_init(self, id_, name, value):\n- return '$(\"#{0}\").autosize();'.format(id_)\n+ return 'autosize($(\"#{0}\"));'.format(id_)\n \n class AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n", "issue": "AdminAutoHeightTextInput doesn't auto-height on page load\n`wagtailadmin.widgets.AdminAutoHeightTextInput` appears now to be the new default widget for `django.db.models.TextField`. It auto-heights when you type into it\n\n\n\n\u2026but on reloading the page, the auto-heighting appears not to occur. This is four rows of text after reloading the page:\n\n \n\nThree rows, with the exact same widget height:\n\n\n\nBut two rows with a reduced widget height:\n\n\n\n@davecranwell suggested an off-by-one error, which seems reasonable, given the slightly different heights above. \n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms import widgets\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.loader import render_to_string\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.models import Page\n\nfrom taggit.forms import TagWidget\n\n\nclass AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):\n def __init__(self, attrs=None):\n # Use more appropriate rows default, given autoheight will alter this anyway\n default_attrs = {'rows': '1'}\n if attrs:\n default_attrs.update(attrs)\n\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n\n def render_js_init(self, id_, name, value):\n return '$(\"#{0}\").autosize();'.format(id_)\n\nclass AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n # it can still be overridden explicitly, but this way it won't be affected by\n # the DATE_INPUT_FORMATS setting\n def __init__(self, attrs=None, format='%Y-%m-%d'):\n super(AdminDateInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTimeInput(WidgetWithScript, widgets.TimeInput):\n def __init__(self, attrs=None, format='%H:%M'):\n super(AdminTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):\n def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):\n super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTagWidget(WidgetWithScript, TagWidget):\n def render_js_init(self, id_, name, value):\n return \"initTagField({0}, {1});\".format(\n json.dumps(id_),\n json.dumps(reverse('wagtailadmin_tag_autocomplete')))\n\n\nclass AdminChooser(WidgetWithScript, widgets.Input):\n input_type = 'hidden'\n choose_one_text = _(\"Choose an item\")\n choose_another_text = _(\"Choose another item\")\n clear_choice_text = _(\"Clear choice\")\n link_to_chosen_text = _(\"Edit this item\")\n\n def get_instance(self, model_class, value):\n # helper method for cleanly turning 'value' into an instance object\n if value is None:\n return None\n\n try:\n return model_class.objects.get(pk=value)\n except model_class.DoesNotExist:\n return None\n\n def get_instance_and_id(self, model_class, value):\n if value is None:\n return (None, None)\n elif isinstance(value, model_class):\n return (value, value.pk)\n else:\n try:\n return (model_class.objects.get(pk=value), value)\n except model_class.DoesNotExist:\n return (None, None)\n\n def value_from_datadict(self, data, files, name):\n # treat the empty string as None\n result = super(AdminChooser, self).value_from_datadict(data, files, name)\n if result == '':\n return None\n else:\n return result\n\n def __init__(self, **kwargs):\n # allow choose_one_text / choose_another_text to be overridden per-instance\n if 'choose_one_text' in kwargs:\n self.choose_one_text = kwargs.pop('choose_one_text')\n if 'choose_another_text' in kwargs:\n self.choose_another_text = kwargs.pop('choose_another_text')\n if 'clear_choice_text' in kwargs:\n self.clear_choice_text = kwargs.pop('clear_choice_text')\n if 'link_to_chosen_text' in kwargs:\n self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')\n super(AdminChooser, self).__init__(**kwargs)\n\n\nclass AdminPageChooser(AdminChooser):\n target_content_type = None\n choose_one_text = _('Choose a page')\n choose_another_text = _('Choose another page')\n link_to_chosen_text = _('Edit this page')\n\n def __init__(self, content_type=None, **kwargs):\n super(AdminPageChooser, self).__init__(**kwargs)\n self.target_content_type = content_type or ContentType.objects.get_for_model(Page)\n\n def render_html(self, name, value, attrs):\n model_class = self.target_content_type.model_class()\n instance, value = self.get_instance_and_id(model_class, value)\n\n original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)\n\n return render_to_string(\"wagtailadmin/widgets/page_chooser.html\", {\n 'widget': self,\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n 'page': instance,\n })\n\n def render_js_init(self, id_, name, value):\n model_class = self.target_content_type.model_class()\n if isinstance(value, model_class):\n page = value\n else:\n page = self.get_instance(model_class, value)\n parent = page.get_parent() if page else None\n content_type = self.target_content_type\n\n return \"createPageChooser({id}, {content_type}, {parent});\".format(\n id=json.dumps(id_),\n content_type=json.dumps('{app}.{model}'.format(\n app=content_type.app_label,\n model=content_type.model)),\n parent=json.dumps(parent.id if parent else None))\n", "path": "wagtail/wagtailadmin/widgets.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms import widgets\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.loader import render_to_string\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.models import Page\n\nfrom taggit.forms import TagWidget\n\n\nclass AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):\n def __init__(self, attrs=None):\n # Use more appropriate rows default, given autoheight will alter this anyway\n default_attrs = {'rows': '1'}\n if attrs:\n default_attrs.update(attrs)\n\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n\n def render_js_init(self, id_, name, value):\n return 'autosize($(\"#{0}\"));'.format(id_)\n\nclass AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n # it can still be overridden explicitly, but this way it won't be affected by\n # the DATE_INPUT_FORMATS setting\n def __init__(self, attrs=None, format='%Y-%m-%d'):\n super(AdminDateInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTimeInput(WidgetWithScript, widgets.TimeInput):\n def __init__(self, attrs=None, format='%H:%M'):\n super(AdminTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):\n def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):\n super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTagWidget(WidgetWithScript, TagWidget):\n def render_js_init(self, id_, name, value):\n return \"initTagField({0}, {1});\".format(\n json.dumps(id_),\n json.dumps(reverse('wagtailadmin_tag_autocomplete')))\n\n\nclass AdminChooser(WidgetWithScript, widgets.Input):\n input_type = 'hidden'\n choose_one_text = _(\"Choose an item\")\n choose_another_text = _(\"Choose another item\")\n clear_choice_text = _(\"Clear choice\")\n link_to_chosen_text = _(\"Edit this item\")\n\n def get_instance(self, model_class, value):\n # helper method for cleanly turning 'value' into an instance object\n if value is None:\n return None\n\n try:\n return model_class.objects.get(pk=value)\n except model_class.DoesNotExist:\n return None\n\n def get_instance_and_id(self, model_class, value):\n if value is None:\n return (None, None)\n elif isinstance(value, model_class):\n return (value, value.pk)\n else:\n try:\n return (model_class.objects.get(pk=value), value)\n except model_class.DoesNotExist:\n return (None, None)\n\n def value_from_datadict(self, data, files, name):\n # treat the empty string as None\n result = super(AdminChooser, self).value_from_datadict(data, files, name)\n if result == '':\n return None\n else:\n return result\n\n def __init__(self, **kwargs):\n # allow choose_one_text / choose_another_text to be overridden per-instance\n if 'choose_one_text' in kwargs:\n self.choose_one_text = kwargs.pop('choose_one_text')\n if 'choose_another_text' in kwargs:\n self.choose_another_text = kwargs.pop('choose_another_text')\n if 'clear_choice_text' in kwargs:\n self.clear_choice_text = kwargs.pop('clear_choice_text')\n if 'link_to_chosen_text' in kwargs:\n self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')\n super(AdminChooser, self).__init__(**kwargs)\n\n\nclass AdminPageChooser(AdminChooser):\n target_content_type = None\n choose_one_text = _('Choose a page')\n choose_another_text = _('Choose another page')\n link_to_chosen_text = _('Edit this page')\n\n def __init__(self, content_type=None, **kwargs):\n super(AdminPageChooser, self).__init__(**kwargs)\n self.target_content_type = content_type or ContentType.objects.get_for_model(Page)\n\n def render_html(self, name, value, attrs):\n model_class = self.target_content_type.model_class()\n instance, value = self.get_instance_and_id(model_class, value)\n\n original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)\n\n return render_to_string(\"wagtailadmin/widgets/page_chooser.html\", {\n 'widget': self,\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n 'page': instance,\n })\n\n def render_js_init(self, id_, name, value):\n model_class = self.target_content_type.model_class()\n if isinstance(value, model_class):\n page = value\n else:\n page = self.get_instance(model_class, value)\n parent = page.get_parent() if page else None\n content_type = self.target_content_type\n\n return \"createPageChooser({id}, {content_type}, {parent});\".format(\n id=json.dumps(id_),\n content_type=json.dumps('{app}.{model}'.format(\n app=content_type.app_label,\n model=content_type.model)),\n parent=json.dumps(parent.id if parent else None))\n", "path": "wagtail/wagtailadmin/widgets.py"}]} | 2,343 | 151 |
gh_patches_debug_31012 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
enumerators should be able to see "pending/unapproved" indicator updates from other enumerators.
When multiple enumerators are collaborating on a single indicator, they need to be able to see the updates from other enumerators.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/result/indicator_period_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from decimal import Decimal, InvalidOperation
8
9 from django.conf import settings
10 from django.contrib.postgres.fields import ArrayField
11 from django.core.exceptions import ValidationError
12 from django.db import models
13 from django.db.models.signals import post_save
14 from django.dispatch import receiver
15 from django.utils.translation import ugettext_lazy as _
16 from sorl.thumbnail.fields import ImageField
17
18 from .utils import (calculate_percentage, file_path, image_path,
19 MultipleUpdateError, PERCENTAGE_MEASURE, QUALITATIVE,
20 QUANTITATIVE)
21 from akvo.rsr.fields import ValidXMLCharField, ValidXMLTextField
22 from akvo.rsr.mixins import TimestampsMixin, IndicatorUpdateMixin
23 from akvo.utils import rsr_image_path
24
25
26 class IndicatorPeriodData(TimestampsMixin, IndicatorUpdateMixin, models.Model):
27 """
28 Model for adding data to an indicator period.
29 """
30
31 project_relation = 'results__indicators__periods__data__in'
32
33 STATUS_DRAFT = str(_('draft'))
34 STATUS_PENDING = str(_('pending approval'))
35 STATUS_REVISION = str(_('return for revision'))
36 STATUS_APPROVED = str(_('approved'))
37
38 STATUS_DRAFT_CODE = 'D'
39 STATUS_PENDING_CODE = 'P'
40 STATUS_REVISION_CODE = 'R'
41 STATUS_APPROVED_CODE = 'A'
42
43 STATUS_CODES_LIST = [STATUS_DRAFT_CODE, STATUS_PENDING_CODE,
44 STATUS_REVISION_CODE, STATUS_APPROVED_CODE]
45 STATUSES_LABELS_LIST = [STATUS_DRAFT, STATUS_PENDING, STATUS_REVISION,
46 STATUS_APPROVED]
47 STATUSES = list(zip(STATUS_CODES_LIST, STATUSES_LABELS_LIST))
48
49 UPDATE_METHODS = (
50 ('W', _('web')),
51 ('M', _('mobile')),
52 )
53
54 period = models.ForeignKey('IndicatorPeriod', verbose_name=_('indicator period'),
55 related_name='data', on_delete=models.PROTECT)
56 # TODO: rename to created_by when old results framework page is no longer in use
57 user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True,
58 related_name='created_period_updates')
59 approved_by = models.ForeignKey(
60 settings.AUTH_USER_MODEL, verbose_name=_('approved by'), db_index=True,
61 related_name='approved_period_updates', blank=True, null=True,
62 )
63 narrative = ValidXMLTextField(_('qualitative indicator narrative'), blank=True)
64 score_index = models.SmallIntegerField(_('score index'), null=True, blank=True)
65 score_indices = ArrayField(models.SmallIntegerField(), default=[])
66 period_actual_value = ValidXMLCharField(_('period actual value'), max_length=50, default='')
67 status = ValidXMLCharField(_('status'), max_length=1, choices=STATUSES, db_index=True,
68 default=STATUS_DRAFT_CODE)
69 text = ValidXMLTextField(_('text'), blank=True)
70 review_note = ValidXMLTextField(_('text'), blank=True)
71 photo = ImageField(_('photo'), blank=True, upload_to=image_path, max_length=255)
72 file = models.FileField(_('file'), blank=True, upload_to=file_path, max_length=255)
73 update_method = ValidXMLCharField(_('update method'), blank=True, max_length=1,
74 choices=UPDATE_METHODS, db_index=True, default='W')
75
76 class Meta:
77 app_label = 'rsr'
78 verbose_name = _('indicator period data')
79 verbose_name_plural = _('indicator period data')
80 ordering = ('-id', )
81
82 def save(self, recalculate=True, *args, **kwargs):
83 # Allow only a single update for percentage measure indicators
84 if not self.period.can_save_update(self.id):
85 raise MultipleUpdateError('Cannot create multiple updates with percentages')
86
87 if (
88 self.period.indicator.measure == PERCENTAGE_MEASURE
89 and self.numerator is not None
90 and self.denominator not in {0, '0', None}
91 ):
92 self.value = calculate_percentage(self.numerator, self.denominator)
93
94 super(IndicatorPeriodData, self).save(*args, **kwargs)
95
96 # In case the status is approved, recalculate the period
97 if recalculate and self.status == self.STATUS_APPROVED_CODE:
98 # FIXME: Should we call this even when status is not approved?
99 self.period.recalculate_period()
100 self.period.update_actual_comment()
101 # Update score even when the update is not approved, yet. It handles the
102 # case where an approved update is returned for revision, etc.
103 self.period.update_score()
104
105 def delete(self, *args, **kwargs):
106 old_status = self.status
107
108 super(IndicatorPeriodData, self).delete(*args, **kwargs)
109
110 # In case the status was approved, recalculate the period
111 if old_status == self.STATUS_APPROVED_CODE:
112 self.period.recalculate_period()
113 self.period.update_actual_comment()
114 self.period.update_score()
115
116 def clean(self):
117 """
118 Perform several checks before we can actually save the update data.
119 """
120 validation_errors = {}
121
122 project = self.period.indicator.result.project
123
124 # Don't allow a data update to an unpublished project
125 if not project.is_published():
126 validation_errors['period'] = str(_('Indicator period must be part of a published '
127 'project to add data to it'))
128 raise ValidationError(validation_errors)
129
130 # Don't allow a data update to a non-Impact project
131 if not project.is_impact_project:
132 validation_errors['period'] = str(_('Indicator period must be part of an RSR '
133 'Impact project to add data to it'))
134 raise ValidationError(validation_errors)
135
136 # Don't allow a data update to a locked period
137 if self.period.locked:
138 validation_errors['period'] = str(_('Indicator period must be unlocked to add '
139 'data to it'))
140 raise ValidationError(validation_errors)
141
142 # Don't allow a data update to an aggregated parent period with 'percentage' as measurement
143 if self.period.indicator.children_aggregate_percentage:
144 validation_errors['period'] = str(
145 _('Indicator period has an average aggregate of the child projects. Disable '
146 'aggregations to add data to it'))
147 raise ValidationError(validation_errors)
148
149 if self.pk:
150 orig = IndicatorPeriodData.objects.get(pk=self.pk)
151
152 # Don't allow for the indicator period to change
153 if orig.period != self.period:
154 validation_errors['period'] = str(_('Not allowed to change indicator period '
155 'in a data update'))
156
157 if self.period.indicator.type == QUANTITATIVE:
158 if self.narrative is not None:
159 validation_errors['period'] = str(
160 _('Narrative field should be empty in quantitative indicators'))
161 if self.value is not None:
162 try:
163 self.value = Decimal(self.value)
164 except Exception:
165 validation_errors['period'] = str(
166 _('Only numeric values are allowed in quantitative indicators'))
167
168 if self.period.indicator.type == QUALITATIVE:
169 if self.value is not None:
170 validation_errors['period'] = str(
171 _('Value field should be empty in qualitative indicators'))
172
173 if validation_errors:
174 raise ValidationError(validation_errors)
175
176 @property
177 def status_display(self):
178 """
179 Returns the display of the status.
180 """
181 try:
182 return dict(self.STATUSES)[self.status].capitalize()
183 except KeyError:
184 return ''
185
186 @property
187 def photo_url(self):
188 """
189 Returns the full URL of the photo.
190 """
191 return self.photo.url if self.photo else ''
192
193 @property
194 def file_url(self):
195 """
196 Returns the full URL of the file.
197 """
198 return self.file.url if self.file else ''
199
200 def update_new_value(self):
201 """Returns a string with the new value."""
202 try:
203 add_up = Decimal(self.value) + Decimal(self.period_actual_value)
204 relative = '+' + str(self.value) if self.value >= 0 else str(self.value)
205 return "{} ({})".format(str(add_up), relative)
206 except (InvalidOperation, TypeError):
207 return self.value
208
209 @classmethod
210 def get_user_viewable_updates(cls, queryset, user):
211 approved_updates = queryset.filter(status=cls.STATUS_APPROVED_CODE)
212
213 if user.is_anonymous():
214 f_queryset = approved_updates
215
216 elif user.is_admin or user.is_superuser:
217 f_queryset = queryset
218
219 else:
220 own_updates = queryset.filter(user=user)
221 non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)
222 filter_ = user.get_permission_filter(
223 'rsr.view_indicatorperioddata',
224 'period__indicator__result__project__'
225 )
226
227 from akvo.rsr.models import Project
228 projects = Project.objects\
229 .filter(results__indicators__periods__data__in=queryset)\
230 .distinct()
231
232 project = projects.first()
233 if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():
234 others_updates = non_draft_updates
235 else:
236 others_updates = non_draft_updates.filter(filter_)
237
238 f_queryset = (
239 approved_updates
240 | own_updates
241 | others_updates
242 )
243
244 return f_queryset.distinct()
245
246
247 def update_image_path(instance, file_name):
248 path = 'db/indicator_period_data/%d/data_photo/%%(instance_pk)s/%%(file_name)s' % instance.update.pk
249 return rsr_image_path(instance, file_name, path)
250
251
252 class IndicatorPeriodDataPhoto(models.Model):
253 update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)
254 photo = ImageField(_('photo'), upload_to=update_image_path, max_length=255)
255
256 class Meta:
257 app_label = 'rsr'
258
259
260 def update_file_path(instance, file_name):
261 path = 'db/indicator_period_data/%d/data_file/%%(instance_pk)s/%%(file_name)s' % instance.update.pk
262 return rsr_image_path(instance, file_name, path)
263
264
265 class IndicatorPeriodDataFile(models.Model):
266 update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)
267 file = models.FileField(_('file'), upload_to=update_file_path, max_length=255)
268
269 class Meta:
270 app_label = 'rsr'
271
272
273 @receiver(post_save, sender=IndicatorPeriodData)
274 def set_qualitative_narrative(sender, **kwargs):
275 """Update the narrative field of a qualitative indicator on updates."""
276
277 # Disable signal handler when loading fixtures
278 if kwargs.get('raw', False):
279 return
280
281 update = kwargs['instance']
282 if update.status != IndicatorPeriodData.STATUS_APPROVED_CODE:
283 return
284
285 if update.period.indicator.type != QUALITATIVE:
286 return
287
288 # Current update is the latest update?
289 if update.period.approved_updates.last().id != update.id:
290 return
291
292 update.period.narrative = update.narrative
293 update.period.save()
294
295
296 @receiver(post_save, sender=IndicatorPeriodData)
297 def _send_return_for_revision_email(sender, **kwargs):
298 """Send email to assigned enumerator when indicator is returned for revision."""
299
300 # Disable signal handler when loading fixtures
301 if kwargs.get('raw', False):
302 return
303
304 update = kwargs['instance']
305 if update.status != IndicatorPeriodData.STATUS_REVISION_CODE:
306 return
307
308 from akvo.rest.views.project_enumerators import send_return_for_revision_email
309 send_return_for_revision_email(update)
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/models/result/indicator_period_data.py b/akvo/rsr/models/result/indicator_period_data.py
--- a/akvo/rsr/models/result/indicator_period_data.py
+++ b/akvo/rsr/models/result/indicator_period_data.py
@@ -217,29 +217,29 @@
f_queryset = queryset
else:
- own_updates = queryset.filter(user=user)
- non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)
- filter_ = user.get_permission_filter(
- 'rsr.view_indicatorperioddata',
- 'period__indicator__result__project__'
- )
-
from akvo.rsr.models import Project
projects = Project.objects\
.filter(results__indicators__periods__data__in=queryset)\
.distinct()
+ project = projects.first() if projects.count() == 1 else None
+
+ # Allow Nuffic users to see all updates, irrespective of what state they are in
+ if project is not None and project.in_nuffic_hierarchy() and user.has_perm('rsr.view_project', project):
+ f_queryset = queryset
- project = projects.first()
- if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():
- others_updates = non_draft_updates
else:
+ own_updates = queryset.filter(user=user)
+ non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)
+ filter_ = user.get_permission_filter(
+ 'rsr.view_indicatorperioddata',
+ 'period__indicator__result__project__'
+ )
others_updates = non_draft_updates.filter(filter_)
-
- f_queryset = (
- approved_updates
- | own_updates
- | others_updates
- )
+ f_queryset = (
+ approved_updates
+ | own_updates
+ | others_updates
+ )
return f_queryset.distinct()
| {"golden_diff": "diff --git a/akvo/rsr/models/result/indicator_period_data.py b/akvo/rsr/models/result/indicator_period_data.py\n--- a/akvo/rsr/models/result/indicator_period_data.py\n+++ b/akvo/rsr/models/result/indicator_period_data.py\n@@ -217,29 +217,29 @@\n f_queryset = queryset\n \n else:\n- own_updates = queryset.filter(user=user)\n- non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n- filter_ = user.get_permission_filter(\n- 'rsr.view_indicatorperioddata',\n- 'period__indicator__result__project__'\n- )\n-\n from akvo.rsr.models import Project\n projects = Project.objects\\\n .filter(results__indicators__periods__data__in=queryset)\\\n .distinct()\n+ project = projects.first() if projects.count() == 1 else None\n+\n+ # Allow Nuffic users to see all updates, irrespective of what state they are in\n+ if project is not None and project.in_nuffic_hierarchy() and user.has_perm('rsr.view_project', project):\n+ f_queryset = queryset\n \n- project = projects.first()\n- if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():\n- others_updates = non_draft_updates\n else:\n+ own_updates = queryset.filter(user=user)\n+ non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n+ filter_ = user.get_permission_filter(\n+ 'rsr.view_indicatorperioddata',\n+ 'period__indicator__result__project__'\n+ )\n others_updates = non_draft_updates.filter(filter_)\n-\n- f_queryset = (\n- approved_updates\n- | own_updates\n- | others_updates\n- )\n+ f_queryset = (\n+ approved_updates\n+ | own_updates\n+ | others_updates\n+ )\n \n return f_queryset.distinct()\n", "issue": "enumerators should be able to see \"pending/unapproved\" indicator updates from other enumerators.\nWhen multiple enumerators are collaborating on a single indicator, they need to be able to see the updates from other enumerators. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom decimal import Decimal, InvalidOperation\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom sorl.thumbnail.fields import ImageField\n\nfrom .utils import (calculate_percentage, file_path, image_path,\n MultipleUpdateError, PERCENTAGE_MEASURE, QUALITATIVE,\n QUANTITATIVE)\nfrom akvo.rsr.fields import ValidXMLCharField, ValidXMLTextField\nfrom akvo.rsr.mixins import TimestampsMixin, IndicatorUpdateMixin\nfrom akvo.utils import rsr_image_path\n\n\nclass IndicatorPeriodData(TimestampsMixin, IndicatorUpdateMixin, models.Model):\n \"\"\"\n Model for adding data to an indicator period.\n \"\"\"\n\n project_relation = 'results__indicators__periods__data__in'\n\n STATUS_DRAFT = str(_('draft'))\n STATUS_PENDING = str(_('pending approval'))\n STATUS_REVISION = str(_('return for revision'))\n STATUS_APPROVED = str(_('approved'))\n\n STATUS_DRAFT_CODE = 'D'\n STATUS_PENDING_CODE = 'P'\n STATUS_REVISION_CODE = 'R'\n STATUS_APPROVED_CODE = 'A'\n\n STATUS_CODES_LIST = [STATUS_DRAFT_CODE, STATUS_PENDING_CODE,\n STATUS_REVISION_CODE, STATUS_APPROVED_CODE]\n STATUSES_LABELS_LIST = [STATUS_DRAFT, STATUS_PENDING, STATUS_REVISION,\n STATUS_APPROVED]\n STATUSES = list(zip(STATUS_CODES_LIST, STATUSES_LABELS_LIST))\n\n UPDATE_METHODS = (\n ('W', _('web')),\n ('M', _('mobile')),\n )\n\n period = models.ForeignKey('IndicatorPeriod', verbose_name=_('indicator period'),\n related_name='data', on_delete=models.PROTECT)\n # TODO: rename to created_by when old results framework page is no longer in use\n user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True,\n related_name='created_period_updates')\n approved_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, verbose_name=_('approved by'), db_index=True,\n related_name='approved_period_updates', blank=True, null=True,\n )\n narrative = ValidXMLTextField(_('qualitative indicator narrative'), blank=True)\n score_index = models.SmallIntegerField(_('score index'), null=True, blank=True)\n score_indices = ArrayField(models.SmallIntegerField(), default=[])\n period_actual_value = ValidXMLCharField(_('period actual value'), max_length=50, default='')\n status = ValidXMLCharField(_('status'), max_length=1, choices=STATUSES, db_index=True,\n default=STATUS_DRAFT_CODE)\n text = ValidXMLTextField(_('text'), blank=True)\n review_note = ValidXMLTextField(_('text'), blank=True)\n photo = ImageField(_('photo'), blank=True, upload_to=image_path, max_length=255)\n file = models.FileField(_('file'), blank=True, upload_to=file_path, max_length=255)\n update_method = ValidXMLCharField(_('update method'), blank=True, max_length=1,\n choices=UPDATE_METHODS, db_index=True, default='W')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('indicator period data')\n verbose_name_plural = _('indicator period data')\n ordering = ('-id', )\n\n def save(self, recalculate=True, *args, **kwargs):\n # Allow only a single update for percentage measure indicators\n if not self.period.can_save_update(self.id):\n raise MultipleUpdateError('Cannot create multiple updates with percentages')\n\n if (\n self.period.indicator.measure == PERCENTAGE_MEASURE\n and self.numerator is not None\n and self.denominator not in {0, '0', None}\n ):\n self.value = calculate_percentage(self.numerator, self.denominator)\n\n super(IndicatorPeriodData, self).save(*args, **kwargs)\n\n # In case the status is approved, recalculate the period\n if recalculate and self.status == self.STATUS_APPROVED_CODE:\n # FIXME: Should we call this even when status is not approved?\n self.period.recalculate_period()\n self.period.update_actual_comment()\n # Update score even when the update is not approved, yet. It handles the\n # case where an approved update is returned for revision, etc.\n self.period.update_score()\n\n def delete(self, *args, **kwargs):\n old_status = self.status\n\n super(IndicatorPeriodData, self).delete(*args, **kwargs)\n\n # In case the status was approved, recalculate the period\n if old_status == self.STATUS_APPROVED_CODE:\n self.period.recalculate_period()\n self.period.update_actual_comment()\n self.period.update_score()\n\n def clean(self):\n \"\"\"\n Perform several checks before we can actually save the update data.\n \"\"\"\n validation_errors = {}\n\n project = self.period.indicator.result.project\n\n # Don't allow a data update to an unpublished project\n if not project.is_published():\n validation_errors['period'] = str(_('Indicator period must be part of a published '\n 'project to add data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to a non-Impact project\n if not project.is_impact_project:\n validation_errors['period'] = str(_('Indicator period must be part of an RSR '\n 'Impact project to add data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to a locked period\n if self.period.locked:\n validation_errors['period'] = str(_('Indicator period must be unlocked to add '\n 'data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to an aggregated parent period with 'percentage' as measurement\n if self.period.indicator.children_aggregate_percentage:\n validation_errors['period'] = str(\n _('Indicator period has an average aggregate of the child projects. Disable '\n 'aggregations to add data to it'))\n raise ValidationError(validation_errors)\n\n if self.pk:\n orig = IndicatorPeriodData.objects.get(pk=self.pk)\n\n # Don't allow for the indicator period to change\n if orig.period != self.period:\n validation_errors['period'] = str(_('Not allowed to change indicator period '\n 'in a data update'))\n\n if self.period.indicator.type == QUANTITATIVE:\n if self.narrative is not None:\n validation_errors['period'] = str(\n _('Narrative field should be empty in quantitative indicators'))\n if self.value is not None:\n try:\n self.value = Decimal(self.value)\n except Exception:\n validation_errors['period'] = str(\n _('Only numeric values are allowed in quantitative indicators'))\n\n if self.period.indicator.type == QUALITATIVE:\n if self.value is not None:\n validation_errors['period'] = str(\n _('Value field should be empty in qualitative indicators'))\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n @property\n def status_display(self):\n \"\"\"\n Returns the display of the status.\n \"\"\"\n try:\n return dict(self.STATUSES)[self.status].capitalize()\n except KeyError:\n return ''\n\n @property\n def photo_url(self):\n \"\"\"\n Returns the full URL of the photo.\n \"\"\"\n return self.photo.url if self.photo else ''\n\n @property\n def file_url(self):\n \"\"\"\n Returns the full URL of the file.\n \"\"\"\n return self.file.url if self.file else ''\n\n def update_new_value(self):\n \"\"\"Returns a string with the new value.\"\"\"\n try:\n add_up = Decimal(self.value) + Decimal(self.period_actual_value)\n relative = '+' + str(self.value) if self.value >= 0 else str(self.value)\n return \"{} ({})\".format(str(add_up), relative)\n except (InvalidOperation, TypeError):\n return self.value\n\n @classmethod\n def get_user_viewable_updates(cls, queryset, user):\n approved_updates = queryset.filter(status=cls.STATUS_APPROVED_CODE)\n\n if user.is_anonymous():\n f_queryset = approved_updates\n\n elif user.is_admin or user.is_superuser:\n f_queryset = queryset\n\n else:\n own_updates = queryset.filter(user=user)\n non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n filter_ = user.get_permission_filter(\n 'rsr.view_indicatorperioddata',\n 'period__indicator__result__project__'\n )\n\n from akvo.rsr.models import Project\n projects = Project.objects\\\n .filter(results__indicators__periods__data__in=queryset)\\\n .distinct()\n\n project = projects.first()\n if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():\n others_updates = non_draft_updates\n else:\n others_updates = non_draft_updates.filter(filter_)\n\n f_queryset = (\n approved_updates\n | own_updates\n | others_updates\n )\n\n return f_queryset.distinct()\n\n\ndef update_image_path(instance, file_name):\n path = 'db/indicator_period_data/%d/data_photo/%%(instance_pk)s/%%(file_name)s' % instance.update.pk\n return rsr_image_path(instance, file_name, path)\n\n\nclass IndicatorPeriodDataPhoto(models.Model):\n update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)\n photo = ImageField(_('photo'), upload_to=update_image_path, max_length=255)\n\n class Meta:\n app_label = 'rsr'\n\n\ndef update_file_path(instance, file_name):\n path = 'db/indicator_period_data/%d/data_file/%%(instance_pk)s/%%(file_name)s' % instance.update.pk\n return rsr_image_path(instance, file_name, path)\n\n\nclass IndicatorPeriodDataFile(models.Model):\n update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)\n file = models.FileField(_('file'), upload_to=update_file_path, max_length=255)\n\n class Meta:\n app_label = 'rsr'\n\n\n@receiver(post_save, sender=IndicatorPeriodData)\ndef set_qualitative_narrative(sender, **kwargs):\n \"\"\"Update the narrative field of a qualitative indicator on updates.\"\"\"\n\n # Disable signal handler when loading fixtures\n if kwargs.get('raw', False):\n return\n\n update = kwargs['instance']\n if update.status != IndicatorPeriodData.STATUS_APPROVED_CODE:\n return\n\n if update.period.indicator.type != QUALITATIVE:\n return\n\n # Current update is the latest update?\n if update.period.approved_updates.last().id != update.id:\n return\n\n update.period.narrative = update.narrative\n update.period.save()\n\n\n@receiver(post_save, sender=IndicatorPeriodData)\ndef _send_return_for_revision_email(sender, **kwargs):\n \"\"\"Send email to assigned enumerator when indicator is returned for revision.\"\"\"\n\n # Disable signal handler when loading fixtures\n if kwargs.get('raw', False):\n return\n\n update = kwargs['instance']\n if update.status != IndicatorPeriodData.STATUS_REVISION_CODE:\n return\n\n from akvo.rest.views.project_enumerators import send_return_for_revision_email\n send_return_for_revision_email(update)\n", "path": "akvo/rsr/models/result/indicator_period_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom decimal import Decimal, InvalidOperation\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom sorl.thumbnail.fields import ImageField\n\nfrom .utils import (calculate_percentage, file_path, image_path,\n MultipleUpdateError, PERCENTAGE_MEASURE, QUALITATIVE,\n QUANTITATIVE)\nfrom akvo.rsr.fields import ValidXMLCharField, ValidXMLTextField\nfrom akvo.rsr.mixins import TimestampsMixin, IndicatorUpdateMixin\nfrom akvo.utils import rsr_image_path\n\n\nclass IndicatorPeriodData(TimestampsMixin, IndicatorUpdateMixin, models.Model):\n \"\"\"\n Model for adding data to an indicator period.\n \"\"\"\n\n project_relation = 'results__indicators__periods__data__in'\n\n STATUS_DRAFT = str(_('draft'))\n STATUS_PENDING = str(_('pending approval'))\n STATUS_REVISION = str(_('return for revision'))\n STATUS_APPROVED = str(_('approved'))\n\n STATUS_DRAFT_CODE = 'D'\n STATUS_PENDING_CODE = 'P'\n STATUS_REVISION_CODE = 'R'\n STATUS_APPROVED_CODE = 'A'\n\n STATUS_CODES_LIST = [STATUS_DRAFT_CODE, STATUS_PENDING_CODE,\n STATUS_REVISION_CODE, STATUS_APPROVED_CODE]\n STATUSES_LABELS_LIST = [STATUS_DRAFT, STATUS_PENDING, STATUS_REVISION,\n STATUS_APPROVED]\n STATUSES = list(zip(STATUS_CODES_LIST, STATUSES_LABELS_LIST))\n\n UPDATE_METHODS = (\n ('W', _('web')),\n ('M', _('mobile')),\n )\n\n period = models.ForeignKey('IndicatorPeriod', verbose_name=_('indicator period'),\n related_name='data', on_delete=models.PROTECT)\n # TODO: rename to created_by when old results framework page is no longer in use\n user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True,\n related_name='created_period_updates')\n approved_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, verbose_name=_('approved by'), db_index=True,\n related_name='approved_period_updates', blank=True, null=True,\n )\n narrative = ValidXMLTextField(_('qualitative indicator narrative'), blank=True)\n score_index = models.SmallIntegerField(_('score index'), null=True, blank=True)\n score_indices = ArrayField(models.SmallIntegerField(), default=[])\n period_actual_value = ValidXMLCharField(_('period actual value'), max_length=50, default='')\n status = ValidXMLCharField(_('status'), max_length=1, choices=STATUSES, db_index=True,\n default=STATUS_DRAFT_CODE)\n text = ValidXMLTextField(_('text'), blank=True)\n review_note = ValidXMLTextField(_('text'), blank=True)\n photo = ImageField(_('photo'), blank=True, upload_to=image_path, max_length=255)\n file = models.FileField(_('file'), blank=True, upload_to=file_path, max_length=255)\n update_method = ValidXMLCharField(_('update method'), blank=True, max_length=1,\n choices=UPDATE_METHODS, db_index=True, default='W')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('indicator period data')\n verbose_name_plural = _('indicator period data')\n ordering = ('-id', )\n\n def save(self, recalculate=True, *args, **kwargs):\n # Allow only a single update for percentage measure indicators\n if not self.period.can_save_update(self.id):\n raise MultipleUpdateError('Cannot create multiple updates with percentages')\n\n if (\n self.period.indicator.measure == PERCENTAGE_MEASURE\n and self.numerator is not None\n and self.denominator not in {0, '0', None}\n ):\n self.value = calculate_percentage(self.numerator, self.denominator)\n\n super(IndicatorPeriodData, self).save(*args, **kwargs)\n\n # In case the status is approved, recalculate the period\n if recalculate and self.status == self.STATUS_APPROVED_CODE:\n # FIXME: Should we call this even when status is not approved?\n self.period.recalculate_period()\n self.period.update_actual_comment()\n # Update score even when the update is not approved, yet. It handles the\n # case where an approved update is returned for revision, etc.\n self.period.update_score()\n\n def delete(self, *args, **kwargs):\n old_status = self.status\n\n super(IndicatorPeriodData, self).delete(*args, **kwargs)\n\n # In case the status was approved, recalculate the period\n if old_status == self.STATUS_APPROVED_CODE:\n self.period.recalculate_period()\n self.period.update_actual_comment()\n self.period.update_score()\n\n def clean(self):\n \"\"\"\n Perform several checks before we can actually save the update data.\n \"\"\"\n validation_errors = {}\n\n project = self.period.indicator.result.project\n\n # Don't allow a data update to an unpublished project\n if not project.is_published():\n validation_errors['period'] = str(_('Indicator period must be part of a published '\n 'project to add data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to a non-Impact project\n if not project.is_impact_project:\n validation_errors['period'] = str(_('Indicator period must be part of an RSR '\n 'Impact project to add data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to a locked period\n if self.period.locked:\n validation_errors['period'] = str(_('Indicator period must be unlocked to add '\n 'data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to an aggregated parent period with 'percentage' as measurement\n if self.period.indicator.children_aggregate_percentage:\n validation_errors['period'] = str(\n _('Indicator period has an average aggregate of the child projects. Disable '\n 'aggregations to add data to it'))\n raise ValidationError(validation_errors)\n\n if self.pk:\n orig = IndicatorPeriodData.objects.get(pk=self.pk)\n\n # Don't allow for the indicator period to change\n if orig.period != self.period:\n validation_errors['period'] = str(_('Not allowed to change indicator period '\n 'in a data update'))\n\n if self.period.indicator.type == QUANTITATIVE:\n if self.narrative is not None:\n validation_errors['period'] = str(\n _('Narrative field should be empty in quantitative indicators'))\n if self.value is not None:\n try:\n self.value = Decimal(self.value)\n except Exception:\n validation_errors['period'] = str(\n _('Only numeric values are allowed in quantitative indicators'))\n\n if self.period.indicator.type == QUALITATIVE:\n if self.value is not None:\n validation_errors['period'] = str(\n _('Value field should be empty in qualitative indicators'))\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n @property\n def status_display(self):\n \"\"\"\n Returns the display of the status.\n \"\"\"\n try:\n return dict(self.STATUSES)[self.status].capitalize()\n except KeyError:\n return ''\n\n @property\n def photo_url(self):\n \"\"\"\n Returns the full URL of the photo.\n \"\"\"\n return self.photo.url if self.photo else ''\n\n @property\n def file_url(self):\n \"\"\"\n Returns the full URL of the file.\n \"\"\"\n return self.file.url if self.file else ''\n\n def update_new_value(self):\n \"\"\"Returns a string with the new value.\"\"\"\n try:\n add_up = Decimal(self.value) + Decimal(self.period_actual_value)\n relative = '+' + str(self.value) if self.value >= 0 else str(self.value)\n return \"{} ({})\".format(str(add_up), relative)\n except (InvalidOperation, TypeError):\n return self.value\n\n @classmethod\n def get_user_viewable_updates(cls, queryset, user):\n approved_updates = queryset.filter(status=cls.STATUS_APPROVED_CODE)\n\n if user.is_anonymous():\n f_queryset = approved_updates\n\n elif user.is_admin or user.is_superuser:\n f_queryset = queryset\n\n else:\n from akvo.rsr.models import Project\n projects = Project.objects\\\n .filter(results__indicators__periods__data__in=queryset)\\\n .distinct()\n project = projects.first() if projects.count() == 1 else None\n\n # Allow Nuffic users to see all updates, irrespective of what state they are in\n if project is not None and project.in_nuffic_hierarchy() and user.has_perm('rsr.view_project', project):\n f_queryset = queryset\n\n else:\n own_updates = queryset.filter(user=user)\n non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n filter_ = user.get_permission_filter(\n 'rsr.view_indicatorperioddata',\n 'period__indicator__result__project__'\n )\n others_updates = non_draft_updates.filter(filter_)\n f_queryset = (\n approved_updates\n | own_updates\n | others_updates\n )\n\n return f_queryset.distinct()\n\n\ndef update_image_path(instance, file_name):\n path = 'db/indicator_period_data/%d/data_photo/%%(instance_pk)s/%%(file_name)s' % instance.update.pk\n return rsr_image_path(instance, file_name, path)\n\n\nclass IndicatorPeriodDataPhoto(models.Model):\n update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)\n photo = ImageField(_('photo'), upload_to=update_image_path, max_length=255)\n\n class Meta:\n app_label = 'rsr'\n\n\ndef update_file_path(instance, file_name):\n path = 'db/indicator_period_data/%d/data_file/%%(instance_pk)s/%%(file_name)s' % instance.update.pk\n return rsr_image_path(instance, file_name, path)\n\n\nclass IndicatorPeriodDataFile(models.Model):\n update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)\n file = models.FileField(_('file'), upload_to=update_file_path, max_length=255)\n\n class Meta:\n app_label = 'rsr'\n\n\n@receiver(post_save, sender=IndicatorPeriodData)\ndef set_qualitative_narrative(sender, **kwargs):\n \"\"\"Update the narrative field of a qualitative indicator on updates.\"\"\"\n\n # Disable signal handler when loading fixtures\n if kwargs.get('raw', False):\n return\n\n update = kwargs['instance']\n if update.status != IndicatorPeriodData.STATUS_APPROVED_CODE:\n return\n\n if update.period.indicator.type != QUALITATIVE:\n return\n\n # Current update is the latest update?\n if update.period.approved_updates.last().id != update.id:\n return\n\n update.period.narrative = update.narrative\n update.period.save()\n\n\n@receiver(post_save, sender=IndicatorPeriodData)\ndef _send_return_for_revision_email(sender, **kwargs):\n \"\"\"Send email to assigned enumerator when indicator is returned for revision.\"\"\"\n\n # Disable signal handler when loading fixtures\n if kwargs.get('raw', False):\n return\n\n update = kwargs['instance']\n if update.status != IndicatorPeriodData.STATUS_REVISION_CODE:\n return\n\n from akvo.rest.views.project_enumerators import send_return_for_revision_email\n send_return_for_revision_email(update)\n", "path": "akvo/rsr/models/result/indicator_period_data.py"}]} | 3,641 | 437 |
gh_patches_debug_27372 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-4250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Different mean values of image normalization for HRNet
I find the different configs of HRNet experiments use different mean values for image normalization.
For example,
[fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) uses the `[102.9801, 115.9465, 122.7717]` as the mean value,
while [fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) uses the `[103.530, 116.280, 123.675]` as the mean value.
Which one is correct?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py`
Content:
```
1 _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
2 img_norm_cfg = dict(
3 mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
4 train_pipeline = [
5 dict(type='LoadImageFromFile'),
6 dict(type='LoadAnnotations', with_bbox=True),
7 dict(
8 type='Resize',
9 img_scale=[(1333, 640), (1333, 800)],
10 multiscale_mode='value',
11 keep_ratio=True),
12 dict(type='RandomFlip', flip_ratio=0.5),
13 dict(type='Normalize', **img_norm_cfg),
14 dict(type='Pad', size_divisor=32),
15 dict(type='DefaultFormatBundle'),
16 dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
17 ]
18 test_pipeline = [
19 dict(type='LoadImageFromFile'),
20 dict(
21 type='MultiScaleFlipAug',
22 img_scale=(1333, 800),
23 flip=False,
24 transforms=[
25 dict(type='Resize', keep_ratio=True),
26 dict(type='RandomFlip'),
27 dict(type='Normalize', **img_norm_cfg),
28 dict(type='Pad', size_divisor=32),
29 dict(type='ImageToTensor', keys=['img']),
30 dict(type='Collect', keys=['img']),
31 ])
32 ]
33 data = dict(
34 train=dict(pipeline=train_pipeline),
35 val=dict(pipeline=test_pipeline),
36 test=dict(pipeline=test_pipeline))
37 # learning policy
38 lr_config = dict(step=[16, 22])
39 total_epochs = 24
40
```
Path: `configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py`
Content:
```
1 _base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'
2 model = dict(
3 pretrained='open-mmlab://msra/hrnetv2_w32',
4 backbone=dict(
5 _delete_=True,
6 type='HRNet',
7 extra=dict(
8 stage1=dict(
9 num_modules=1,
10 num_branches=1,
11 block='BOTTLENECK',
12 num_blocks=(4, ),
13 num_channels=(64, )),
14 stage2=dict(
15 num_modules=1,
16 num_branches=2,
17 block='BASIC',
18 num_blocks=(4, 4),
19 num_channels=(32, 64)),
20 stage3=dict(
21 num_modules=4,
22 num_branches=3,
23 block='BASIC',
24 num_blocks=(4, 4, 4),
25 num_channels=(32, 64, 128)),
26 stage4=dict(
27 num_modules=3,
28 num_branches=4,
29 block='BASIC',
30 num_blocks=(4, 4, 4, 4),
31 num_channels=(32, 64, 128, 256)))),
32 neck=dict(
33 _delete_=True,
34 type='HRFPN',
35 in_channels=[32, 64, 128, 256],
36 out_channels=256,
37 stride=2,
38 num_outs=5))
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
@@ -36,3 +36,34 @@
out_channels=256,
stride=2,
num_outs=5))
+img_norm_cfg = dict(
+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+ dict(type='RandomFlip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(1333, 800),
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
@@ -1,6 +1,6 @@
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
img_norm_cfg = dict(
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
| {"golden_diff": "diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n@@ -36,3 +36,34 @@\n out_channels=256,\n stride=2,\n num_outs=5))\n+img_norm_cfg = dict(\n+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\n+train_pipeline = [\n+ dict(type='LoadImageFromFile'),\n+ dict(type='LoadAnnotations', with_bbox=True),\n+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n+ dict(type='RandomFlip', flip_ratio=0.5),\n+ dict(type='Normalize', **img_norm_cfg),\n+ dict(type='Pad', size_divisor=32),\n+ dict(type='DefaultFormatBundle'),\n+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n+]\n+test_pipeline = [\n+ dict(type='LoadImageFromFile'),\n+ dict(\n+ type='MultiScaleFlipAug',\n+ img_scale=(1333, 800),\n+ flip=False,\n+ transforms=[\n+ dict(type='Resize', keep_ratio=True),\n+ dict(type='RandomFlip'),\n+ dict(type='Normalize', **img_norm_cfg),\n+ dict(type='Pad', size_divisor=32),\n+ dict(type='ImageToTensor', keys=['img']),\n+ dict(type='Collect', keys=['img']),\n+ ])\n+]\n+data = dict(\n+ train=dict(pipeline=train_pipeline),\n+ val=dict(pipeline=test_pipeline),\n+ test=dict(pipeline=test_pipeline))\ndiff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n@@ -1,6 +1,6 @@\n _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\n img_norm_cfg = dict(\n- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\n train_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n", "issue": "Different mean values of image normalization for HRNet\nI find the different configs of HRNet experiments use different mean values for image normalization.\r\n\r\nFor example,\r\n[fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) uses the `[102.9801, 115.9465, 122.7717]` as the mean value,\r\nwhile [fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) uses the `[103.530, 116.280, 123.675]` as the mean value.\r\n\r\nWhich one is correct?\n", "before_files": [{"content": "_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 640), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n train=dict(pipeline=train_pipeline),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\n# learning policy\nlr_config = dict(step=[16, 22])\ntotal_epochs = 24\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py"}, {"content": "_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'\nmodel = dict(\n pretrained='open-mmlab://msra/hrnetv2_w32',\n backbone=dict(\n _delete_=True,\n type='HRNet',\n extra=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4, ),\n num_channels=(64, )),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(32, 64)),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(32, 64, 128)),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(32, 64, 128, 256)))),\n neck=dict(\n _delete_=True,\n type='HRFPN',\n in_channels=[32, 64, 128, 256],\n out_channels=256,\n stride=2,\n num_outs=5))\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py"}], "after_files": [{"content": "_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\nimg_norm_cfg = dict(\n mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 640), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n train=dict(pipeline=train_pipeline),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\n# learning policy\nlr_config = dict(step=[16, 22])\ntotal_epochs = 24\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py"}, {"content": "_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'\nmodel = dict(\n pretrained='open-mmlab://msra/hrnetv2_w32',\n backbone=dict(\n _delete_=True,\n type='HRNet',\n extra=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4, ),\n num_channels=(64, )),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(32, 64)),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(32, 64, 128)),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(32, 64, 128, 256)))),\n neck=dict(\n _delete_=True,\n type='HRFPN',\n in_channels=[32, 64, 128, 256],\n out_channels=256,\n stride=2,\n num_outs=5))\nimg_norm_cfg = dict(\n mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n train=dict(pipeline=train_pipeline),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py"}]} | 1,438 | 829 |
gh_patches_debug_45268 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-1001 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Webhook missing: invoice.payment_action_required
I can't find any reference to ```invoice.payment_action_required``` anywhere, however this is a fairly critical piece of off-session SCA workflow. Is it simply a case of adding it to the signals list?
https://stripe.com/docs/api/events/types#event_types-invoice.payment_action_required
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/signals.py`
Content:
```
1 """
2 signals are sent for each event Stripe sends to the app
3
4 Stripe docs for Webhooks: https://stripe.com/docs/webhooks
5 """
6 from django.db.models.signals import pre_delete
7 from django.dispatch import Signal, receiver
8
9 from . import settings as djstripe_settings
10
11 webhook_processing_error = Signal(providing_args=["data", "exception"])
12
13 # A signal for each Event type. See https://stripe.com/docs/api/events/types
14
15 WEBHOOK_SIGNALS = dict(
16 [
17 (hook, Signal(providing_args=["event"]))
18 for hook in [
19 "account.updated",
20 "account.application.authorized",
21 "account.application.deauthorized",
22 "account.external_account.created",
23 "account.external_account.deleted",
24 "account.external_account.updated",
25 "application_fee.created",
26 "application_fee.refunded",
27 "application_fee.refund.updated",
28 "balance.available",
29 "charge.captured",
30 "charge.expired",
31 "charge.failed",
32 "charge.pending",
33 "charge.refunded",
34 "charge.succeeded",
35 "charge.updated",
36 "charge.dispute.closed",
37 "charge.dispute.created",
38 "charge.dispute.funds_reinstated",
39 "charge.dispute.funds_withdrawn",
40 "charge.dispute.updated",
41 "charge.refund.updated",
42 "checkout.session.completed",
43 "coupon.created",
44 "coupon.deleted",
45 "coupon.updated",
46 "customer.created",
47 "customer.deleted",
48 "customer.updated",
49 "customer.discount.created",
50 "customer.discount.deleted",
51 "customer.discount.updated",
52 "customer.source.created",
53 "customer.source.deleted",
54 "customer.source.expiring",
55 "customer.source.updated",
56 "customer.subscription.created",
57 "customer.subscription.deleted",
58 "customer.subscription.trial_will_end",
59 "customer.subscription.updated",
60 "file.created",
61 "invoice.created",
62 "invoice.deleted",
63 "invoice.finalized",
64 "invoice.marked_uncollectible",
65 "invoice.payment_failed",
66 "invoice.payment_succeeded",
67 "invoice.sent",
68 "invoice.upcoming",
69 "invoice.updated",
70 "invoice.voided",
71 "invoiceitem.created",
72 "invoiceitem.deleted",
73 "invoiceitem.updated",
74 "issuing_authorization.created",
75 "issuing_authorization.request",
76 "issuing_authorization.updated",
77 "issuing_card.created",
78 "issuing_card.updated",
79 "issuing_cardholder.created",
80 "issuing_cardholder.updated",
81 "issuing_dispute.created",
82 "issuing_dispute.updated",
83 "issuing_settlement.created",
84 "issuing_settlement.updated",
85 "issuing_transaction.created",
86 "issuing_transaction.updated",
87 "order.created",
88 "order.payment_failed",
89 "order.payment_succeeded",
90 "order.updated",
91 "order_return.created",
92 "payment_intent.amount_capturable_updated",
93 "payment_intent.created",
94 "payment_intent.payment_failed",
95 "payment_intent.succeeded",
96 "payment_method.attached",
97 "payment_method.card_automatically_updated",
98 "payment_method.detached",
99 "payment_method.updated",
100 "payout.canceled",
101 "payout.created",
102 "payout.failed",
103 "payout.paid",
104 "payout.updated",
105 "plan.created",
106 "plan.deleted",
107 "plan.updated",
108 "product.created",
109 "product.deleted",
110 "product.updated",
111 "recipient.created",
112 "recipient.deleted",
113 "recipient.updated",
114 "reporting.report_run.failed",
115 "reporting.report_run.succeeded",
116 "reporting.report_type.updated",
117 "review.closed",
118 "review.opened",
119 "setup_intent.created",
120 "setup_intent.setup_failed",
121 "setup_intent.succeeded",
122 "sigma.scheduled_query_run.created",
123 "sku.created",
124 "sku.deleted",
125 "sku.updated",
126 "source.canceled",
127 "source.chargeable",
128 "source.failed",
129 "source.mandate_notification",
130 "source.refund_attributes_required",
131 "source.transaction.created",
132 "source.transaction.updated",
133 "topup.canceled",
134 "topup.created",
135 "topup.failed",
136 "topup.reversed",
137 "topup.succeeded",
138 "transfer.created",
139 "transfer.reversed",
140 "transfer.updated",
141 # deprecated (no longer in events_types list) - TODO can be deleted?
142 "checkout_beta.session_succeeded",
143 "issuer_fraud_record.created",
144 "payment_intent.requires_capture",
145 "subscription_schedule.canceled",
146 "subscription_schedule.completed",
147 "subscription_schedule.created",
148 "subscription_schedule.released",
149 "subscription_schedule.updated",
150 # special case? - TODO can be deleted?
151 "ping",
152 ]
153 ]
154 )
155
156
157 @receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string())
158 def on_delete_subscriber_purge_customer(instance=None, **kwargs):
159 """ Purge associated customers when the subscriber is deleted. """
160 for customer in instance.djstripe_customers.all():
161 customer.purge()
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/djstripe/signals.py b/djstripe/signals.py
--- a/djstripe/signals.py
+++ b/djstripe/signals.py
@@ -16,36 +16,41 @@
[
(hook, Signal(providing_args=["event"]))
for hook in [
- "account.updated",
+ # Update this by copy-pasting the "enabled_events" enum values from
+ # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json
"account.application.authorized",
"account.application.deauthorized",
"account.external_account.created",
"account.external_account.deleted",
"account.external_account.updated",
+ "account.updated",
"application_fee.created",
- "application_fee.refunded",
"application_fee.refund.updated",
+ "application_fee.refunded",
"balance.available",
+ "capability.updated",
"charge.captured",
- "charge.expired",
- "charge.failed",
- "charge.pending",
- "charge.refunded",
- "charge.succeeded",
- "charge.updated",
"charge.dispute.closed",
"charge.dispute.created",
"charge.dispute.funds_reinstated",
"charge.dispute.funds_withdrawn",
"charge.dispute.updated",
+ "charge.expired",
+ "charge.failed",
+ "charge.pending",
"charge.refund.updated",
+ "charge.refunded",
+ "charge.succeeded",
+ "charge.updated",
"checkout.session.completed",
"coupon.created",
"coupon.deleted",
"coupon.updated",
+ "credit_note.created",
+ "credit_note.updated",
+ "credit_note.voided",
"customer.created",
"customer.deleted",
- "customer.updated",
"customer.discount.created",
"customer.discount.deleted",
"customer.discount.updated",
@@ -57,11 +62,16 @@
"customer.subscription.deleted",
"customer.subscription.trial_will_end",
"customer.subscription.updated",
+ "customer.tax_id.created",
+ "customer.tax_id.deleted",
+ "customer.tax_id.updated",
+ "customer.updated",
"file.created",
"invoice.created",
"invoice.deleted",
"invoice.finalized",
"invoice.marked_uncollectible",
+ "invoice.payment_action_required",
"invoice.payment_failed",
"invoice.payment_succeeded",
"invoice.sent",
@@ -102,12 +112,17 @@
"payout.failed",
"payout.paid",
"payout.updated",
+ "person.created",
+ "person.deleted",
+ "person.updated",
"plan.created",
"plan.deleted",
"plan.updated",
"product.created",
"product.deleted",
"product.updated",
+ "radar.early_fraud_warning.created",
+ "radar.early_fraud_warning.updated",
"recipient.created",
"recipient.deleted",
"recipient.updated",
@@ -130,12 +145,23 @@
"source.refund_attributes_required",
"source.transaction.created",
"source.transaction.updated",
+ "subscription_schedule.aborted",
+ "subscription_schedule.canceled",
+ "subscription_schedule.completed",
+ "subscription_schedule.created",
+ "subscription_schedule.expiring",
+ "subscription_schedule.released",
+ "subscription_schedule.updated",
+ "tax_rate.created",
+ "tax_rate.updated",
"topup.canceled",
"topup.created",
"topup.failed",
"topup.reversed",
"topup.succeeded",
"transfer.created",
+ "transfer.failed",
+ "transfer.paid",
"transfer.reversed",
"transfer.updated",
# deprecated (no longer in events_types list) - TODO can be deleted?
| {"golden_diff": "diff --git a/djstripe/signals.py b/djstripe/signals.py\n--- a/djstripe/signals.py\n+++ b/djstripe/signals.py\n@@ -16,36 +16,41 @@\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n- \"account.updated\",\n+ # Update this by copy-pasting the \"enabled_events\" enum values from\n+ # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n+ \"account.updated\",\n \"application_fee.created\",\n- \"application_fee.refunded\",\n \"application_fee.refund.updated\",\n+ \"application_fee.refunded\",\n \"balance.available\",\n+ \"capability.updated\",\n \"charge.captured\",\n- \"charge.expired\",\n- \"charge.failed\",\n- \"charge.pending\",\n- \"charge.refunded\",\n- \"charge.succeeded\",\n- \"charge.updated\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n+ \"charge.expired\",\n+ \"charge.failed\",\n+ \"charge.pending\",\n \"charge.refund.updated\",\n+ \"charge.refunded\",\n+ \"charge.succeeded\",\n+ \"charge.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n+ \"credit_note.created\",\n+ \"credit_note.updated\",\n+ \"credit_note.voided\",\n \"customer.created\",\n \"customer.deleted\",\n- \"customer.updated\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n@@ -57,11 +62,16 @@\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n+ \"customer.tax_id.created\",\n+ \"customer.tax_id.deleted\",\n+ \"customer.tax_id.updated\",\n+ \"customer.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n+ \"invoice.payment_action_required\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n@@ -102,12 +112,17 @@\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n+ \"person.created\",\n+ \"person.deleted\",\n+ \"person.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n+ \"radar.early_fraud_warning.created\",\n+ \"radar.early_fraud_warning.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n@@ -130,12 +145,23 @@\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n+ \"subscription_schedule.aborted\",\n+ \"subscription_schedule.canceled\",\n+ \"subscription_schedule.completed\",\n+ \"subscription_schedule.created\",\n+ \"subscription_schedule.expiring\",\n+ \"subscription_schedule.released\",\n+ \"subscription_schedule.updated\",\n+ \"tax_rate.created\",\n+ \"tax_rate.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n+ \"transfer.failed\",\n+ \"transfer.paid\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n", "issue": "Webhook missing: invoice.payment_action_required\nI can't find any reference to ```invoice.payment_action_required``` anywhere, however this is a fairly critical piece of off-session SCA workflow. Is it simply a case of adding it to the signals list?\r\n\r\nhttps://stripe.com/docs/api/events/types#event_types-invoice.payment_action_required\n", "before_files": [{"content": "\"\"\"\nsignals are sent for each event Stripe sends to the app\n\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\"\"\"\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import Signal, receiver\n\nfrom . import settings as djstripe_settings\n\nwebhook_processing_error = Signal(providing_args=[\"data\", \"exception\"])\n\n# A signal for each Event type. See https://stripe.com/docs/api/events/types\n\nWEBHOOK_SIGNALS = dict(\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n \"account.updated\",\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n \"application_fee.created\",\n \"application_fee.refunded\",\n \"application_fee.refund.updated\",\n \"balance.available\",\n \"charge.captured\",\n \"charge.expired\",\n \"charge.failed\",\n \"charge.pending\",\n \"charge.refunded\",\n \"charge.succeeded\",\n \"charge.updated\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n \"charge.refund.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n \"customer.created\",\n \"customer.deleted\",\n \"customer.updated\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n \"customer.source.created\",\n \"customer.source.deleted\",\n \"customer.source.expiring\",\n \"customer.source.updated\",\n \"customer.subscription.created\",\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n \"invoice.upcoming\",\n \"invoice.updated\",\n \"invoice.voided\",\n \"invoiceitem.created\",\n \"invoiceitem.deleted\",\n \"invoiceitem.updated\",\n \"issuing_authorization.created\",\n \"issuing_authorization.request\",\n \"issuing_authorization.updated\",\n \"issuing_card.created\",\n \"issuing_card.updated\",\n \"issuing_cardholder.created\",\n \"issuing_cardholder.updated\",\n \"issuing_dispute.created\",\n \"issuing_dispute.updated\",\n \"issuing_settlement.created\",\n \"issuing_settlement.updated\",\n \"issuing_transaction.created\",\n \"issuing_transaction.updated\",\n \"order.created\",\n \"order.payment_failed\",\n \"order.payment_succeeded\",\n \"order.updated\",\n \"order_return.created\",\n \"payment_intent.amount_capturable_updated\",\n \"payment_intent.created\",\n \"payment_intent.payment_failed\",\n \"payment_intent.succeeded\",\n \"payment_method.attached\",\n \"payment_method.card_automatically_updated\",\n \"payment_method.detached\",\n \"payment_method.updated\",\n \"payout.canceled\",\n \"payout.created\",\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n \"reporting.report_run.failed\",\n \"reporting.report_run.succeeded\",\n \"reporting.report_type.updated\",\n \"review.closed\",\n \"review.opened\",\n \"setup_intent.created\",\n \"setup_intent.setup_failed\",\n \"setup_intent.succeeded\",\n \"sigma.scheduled_query_run.created\",\n \"sku.created\",\n \"sku.deleted\",\n \"sku.updated\",\n \"source.canceled\",\n \"source.chargeable\",\n \"source.failed\",\n \"source.mandate_notification\",\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n \"checkout_beta.session_succeeded\",\n \"issuer_fraud_record.created\",\n \"payment_intent.requires_capture\",\n \"subscription_schedule.canceled\",\n \"subscription_schedule.completed\",\n \"subscription_schedule.created\",\n \"subscription_schedule.released\",\n \"subscription_schedule.updated\",\n # special case? - TODO can be deleted?\n \"ping\",\n ]\n ]\n)\n\n\n@receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string())\ndef on_delete_subscriber_purge_customer(instance=None, **kwargs):\n \"\"\" Purge associated customers when the subscriber is deleted. \"\"\"\n for customer in instance.djstripe_customers.all():\n customer.purge()\n", "path": "djstripe/signals.py"}], "after_files": [{"content": "\"\"\"\nsignals are sent for each event Stripe sends to the app\n\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\"\"\"\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import Signal, receiver\n\nfrom . import settings as djstripe_settings\n\nwebhook_processing_error = Signal(providing_args=[\"data\", \"exception\"])\n\n# A signal for each Event type. See https://stripe.com/docs/api/events/types\n\nWEBHOOK_SIGNALS = dict(\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n # Update this by copy-pasting the \"enabled_events\" enum values from\n # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n \"account.updated\",\n \"application_fee.created\",\n \"application_fee.refund.updated\",\n \"application_fee.refunded\",\n \"balance.available\",\n \"capability.updated\",\n \"charge.captured\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n \"charge.expired\",\n \"charge.failed\",\n \"charge.pending\",\n \"charge.refund.updated\",\n \"charge.refunded\",\n \"charge.succeeded\",\n \"charge.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n \"credit_note.created\",\n \"credit_note.updated\",\n \"credit_note.voided\",\n \"customer.created\",\n \"customer.deleted\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n \"customer.source.created\",\n \"customer.source.deleted\",\n \"customer.source.expiring\",\n \"customer.source.updated\",\n \"customer.subscription.created\",\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n \"customer.tax_id.created\",\n \"customer.tax_id.deleted\",\n \"customer.tax_id.updated\",\n \"customer.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n \"invoice.payment_action_required\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n \"invoice.upcoming\",\n \"invoice.updated\",\n \"invoice.voided\",\n \"invoiceitem.created\",\n \"invoiceitem.deleted\",\n \"invoiceitem.updated\",\n \"issuing_authorization.created\",\n \"issuing_authorization.request\",\n \"issuing_authorization.updated\",\n \"issuing_card.created\",\n \"issuing_card.updated\",\n \"issuing_cardholder.created\",\n \"issuing_cardholder.updated\",\n \"issuing_dispute.created\",\n \"issuing_dispute.updated\",\n \"issuing_settlement.created\",\n \"issuing_settlement.updated\",\n \"issuing_transaction.created\",\n \"issuing_transaction.updated\",\n \"order.created\",\n \"order.payment_failed\",\n \"order.payment_succeeded\",\n \"order.updated\",\n \"order_return.created\",\n \"payment_intent.amount_capturable_updated\",\n \"payment_intent.created\",\n \"payment_intent.payment_failed\",\n \"payment_intent.succeeded\",\n \"payment_method.attached\",\n \"payment_method.card_automatically_updated\",\n \"payment_method.detached\",\n \"payment_method.updated\",\n \"payout.canceled\",\n \"payout.created\",\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n \"person.created\",\n \"person.deleted\",\n \"person.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n \"radar.early_fraud_warning.created\",\n \"radar.early_fraud_warning.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n \"reporting.report_run.failed\",\n \"reporting.report_run.succeeded\",\n \"reporting.report_type.updated\",\n \"review.closed\",\n \"review.opened\",\n \"setup_intent.created\",\n \"setup_intent.setup_failed\",\n \"setup_intent.succeeded\",\n \"sigma.scheduled_query_run.created\",\n \"sku.created\",\n \"sku.deleted\",\n \"sku.updated\",\n \"source.canceled\",\n \"source.chargeable\",\n \"source.failed\",\n \"source.mandate_notification\",\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n \"subscription_schedule.aborted\",\n \"subscription_schedule.canceled\",\n \"subscription_schedule.completed\",\n \"subscription_schedule.created\",\n \"subscription_schedule.expiring\",\n \"subscription_schedule.released\",\n \"subscription_schedule.updated\",\n \"tax_rate.created\",\n \"tax_rate.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n \"transfer.failed\",\n \"transfer.paid\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n \"checkout_beta.session_succeeded\",\n \"issuer_fraud_record.created\",\n \"payment_intent.requires_capture\",\n \"subscription_schedule.canceled\",\n \"subscription_schedule.completed\",\n \"subscription_schedule.created\",\n \"subscription_schedule.released\",\n \"subscription_schedule.updated\",\n # special case? - TODO can be deleted?\n \"ping\",\n ]\n ]\n)\n\n\n@receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string())\ndef on_delete_subscriber_purge_customer(instance=None, **kwargs):\n \"\"\" Purge associated customers when the subscriber is deleted. \"\"\"\n for customer in instance.djstripe_customers.all():\n customer.purge()\n", "path": "djstripe/signals.py"}]} | 1,765 | 832 |
gh_patches_debug_11607 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Question: Why is default structuring element of 1-connectivity used by ndimage.label for getting the default markers(labeled local minima) for watershed segmentation?
## Description
In the implemented watershed.py segmentation, one can pass a connectivity argument, when `local_minima` is called by watershed (when `markers=none`) it passes the same connectivity value onto the local minima function, this generates the `markers_bool`, which then is used by `ndimage.label()` to generate the labeled markers. `ndimage.label` can take a structure argument and it defaults to 1-connectivity structure if none are given. IMO it is more intuitive to have `ndimage.label` also use the same connectivity which is passed by the user to the watershed function instead of always using the default 1-connectivity.
Below is the line in the source I am referring to:
https://github.com/scikit-image/scikit-image/blob/663e9e4b0f2ba88249bfef45bc65700555a3f24b/skimage/segmentation/_watershed.py#L76
Instead of the above would it not be better to have?
```python
if connectivity==2:
markers = ndi.label(markers_bool,
structure = [[1,1,1],
[1,1,1],
[1,1,1]])[0]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/segmentation/_watershed.py`
Content:
```
1 """watershed.py - watershed algorithm
2
3 This module implements a watershed algorithm that apportions pixels into
4 marked basins. The algorithm uses a priority queue to hold the pixels
5 with the metric for the priority queue being pixel value, then the time
6 of entry into the queue - this settles ties in favor of the closest marker.
7
8 Some ideas taken from
9 Soille, "Automated Basin Delineation from Digital Elevation Models Using
10 Mathematical Morphology", Signal Processing 20 (1990) 171-182.
11
12 The most important insight in the paper is that entry time onto the queue
13 solves two problems: a pixel should be assigned to the neighbor with the
14 largest gradient or, if there is no gradient, pixels on a plateau should
15 be split between markers on opposite sides.
16
17 Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
18 Website: http://www.cellprofiler.org
19
20 Copyright (c) 2003-2009 Massachusetts Institute of Technology
21 Copyright (c) 2009-2011 Broad Institute
22 All rights reserved.
23
24 Original author: Lee Kamentsky
25 """
26
27 import numpy as np
28 from scipy import ndimage as ndi
29
30 from . import _watershed_cy
31 from ..morphology.extrema import local_minima
32 from ..morphology._util import (_validate_connectivity,
33 _offsets_to_raveled_neighbors)
34 from ..util import crop, regular_seeds
35
36
37 def _validate_inputs(image, markers, mask, connectivity):
38 """Ensure that all inputs to watershed have matching shapes and types.
39
40 Parameters
41 ----------
42 image : array
43 The input image.
44 markers : int or array of int
45 The marker image.
46 mask : array, or None
47 A boolean mask, True where we want to compute the watershed.
48 connectivity : int in {1, ..., image.ndim}
49 The connectivity of the neighborhood of a pixel.
50
51 Returns
52 -------
53 image, markers, mask : arrays
54 The validated and formatted arrays. Image will have dtype float64,
55 markers int32, and mask int8. If ``None`` was given for the mask,
56 it is a volume of all 1s.
57
58 Raises
59 ------
60 ValueError
61 If the shapes of the given arrays don't match.
62 """
63 n_pixels = image.size
64 if mask is None:
65 # Use a complete `True` mask if none is provided
66 mask = np.ones(image.shape, bool)
67 else:
68 mask = np.asanyarray(mask, dtype=bool)
69 n_pixels = np.sum(mask)
70 if mask.shape != image.shape:
71 message = ("`mask` (shape {}) must have same shape as "
72 "`image` (shape {})".format(mask.shape, image.shape))
73 raise ValueError(message)
74 if markers is None:
75 markers_bool = local_minima(image, connectivity=connectivity) * mask
76 markers = ndi.label(markers_bool)[0]
77 elif not isinstance(markers, (np.ndarray, list, tuple)):
78 # not array-like, assume int
79 # given int, assume that number of markers *within mask*.
80 markers = regular_seeds(image.shape,
81 int(markers / (n_pixels / image.size)))
82 markers *= mask
83 else:
84 markers = np.asanyarray(markers) * mask
85 if markers.shape != image.shape:
86 message = ("`markers` (shape {}) must have same shape as "
87 "`image` (shape {})".format(markers.shape, image.shape))
88 raise ValueError(message)
89 return (image.astype(np.float64),
90 markers.astype(np.int32),
91 mask.astype(np.int8))
92
93
94 def watershed(image, markers=None, connectivity=1, offset=None, mask=None,
95 compactness=0, watershed_line=False):
96 """Find watershed basins in `image` flooded from given `markers`.
97
98 Parameters
99 ----------
100 image : ndarray (2-D, 3-D, ...) of integers
101 Data array where the lowest value points are labeled first.
102 markers : int, or ndarray of int, same shape as `image`, optional
103 The desired number of markers, or an array marking the basins with the
104 values to be assigned in the label matrix. Zero means not a marker. If
105 ``None`` (no markers given), the local minima of the image are used as
106 markers.
107 connectivity : ndarray, optional
108 An array with the same number of dimensions as `image` whose
109 non-zero elements indicate neighbors for connection.
110 Following the scipy convention, default is a one-connected array of
111 the dimension of the image.
112 offset : array_like of shape image.ndim, optional
113 offset of the connectivity (one offset per dimension)
114 mask : ndarray of bools or 0s and 1s, optional
115 Array of same shape as `image`. Only points at which mask == True
116 will be labeled.
117 compactness : float, optional
118 Use compact watershed [3]_ with given compactness parameter.
119 Higher values result in more regularly-shaped watershed basins.
120 watershed_line : bool, optional
121 If watershed_line is True, a one-pixel wide line separates the regions
122 obtained by the watershed algorithm. The line has the label 0.
123
124 Returns
125 -------
126 out : ndarray
127 A labeled matrix of the same type and shape as markers
128
129 See Also
130 --------
131 skimage.segmentation.random_walker : random walker segmentation
132 A segmentation algorithm based on anisotropic diffusion, usually
133 slower than the watershed but with good results on noisy data and
134 boundaries with holes.
135
136 Notes
137 -----
138 This function implements a watershed algorithm [1]_ [2]_ that apportions
139 pixels into marked basins. The algorithm uses a priority queue to hold
140 the pixels with the metric for the priority queue being pixel value, then
141 the time of entry into the queue - this settles ties in favor of the
142 closest marker.
143
144 Some ideas taken from
145 Soille, "Automated Basin Delineation from Digital Elevation Models Using
146 Mathematical Morphology", Signal Processing 20 (1990) 171-182
147
148 The most important insight in the paper is that entry time onto the queue
149 solves two problems: a pixel should be assigned to the neighbor with the
150 largest gradient or, if there is no gradient, pixels on a plateau should
151 be split between markers on opposite sides.
152
153 This implementation converts all arguments to specific, lowest common
154 denominator types, then passes these to a C algorithm.
155
156 Markers can be determined manually, or automatically using for example
157 the local minima of the gradient of the image, or the local maxima of the
158 distance function to the background for separating overlapping objects
159 (see example).
160
161 References
162 ----------
163 .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
164
165 .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
166
167 .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
168 Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
169 Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
170 https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf
171
172 Examples
173 --------
174 The watershed algorithm is useful to separate overlapping objects.
175
176 We first generate an initial image with two overlapping circles:
177
178 >>> x, y = np.indices((80, 80))
179 >>> x1, y1, x2, y2 = 28, 28, 44, 52
180 >>> r1, r2 = 16, 20
181 >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
182 >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
183 >>> image = np.logical_or(mask_circle1, mask_circle2)
184
185 Next, we want to separate the two circles. We generate markers at the
186 maxima of the distance to the background:
187
188 >>> from scipy import ndimage as ndi
189 >>> distance = ndi.distance_transform_edt(image)
190 >>> from skimage.feature import peak_local_max
191 >>> local_maxi = peak_local_max(distance, labels=image,
192 ... footprint=np.ones((3, 3)),
193 ... indices=False)
194 >>> markers = ndi.label(local_maxi)[0]
195
196 Finally, we run the watershed on the image and markers:
197
198 >>> labels = watershed(-distance, markers, mask=image)
199
200 The algorithm works also for 3-D images, and can be used for example to
201 separate overlapping spheres.
202 """
203 image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
204 connectivity, offset = _validate_connectivity(image.ndim, connectivity,
205 offset)
206
207 # pad the image, markers, and mask so that we can use the mask to
208 # keep from running off the edges
209 pad_width = [(p, p) for p in offset]
210 image = np.pad(image, pad_width, mode='constant')
211 mask = np.pad(mask, pad_width, mode='constant').ravel()
212 output = np.pad(markers, pad_width, mode='constant')
213
214 flat_neighborhood = _offsets_to_raveled_neighbors(
215 image.shape, connectivity, center=offset)
216 marker_locations = np.flatnonzero(output)
217 image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize
218
219 _watershed_cy.watershed_raveled(image.ravel(),
220 marker_locations, flat_neighborhood,
221 mask, image_strides, compactness,
222 output.ravel(),
223 watershed_line)
224
225 output = crop(output, pad_width, copy=True)
226
227 return output
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py
--- a/skimage/segmentation/_watershed.py
+++ b/skimage/segmentation/_watershed.py
@@ -73,7 +73,8 @@
raise ValueError(message)
if markers is None:
markers_bool = local_minima(image, connectivity=connectivity) * mask
- markers = ndi.label(markers_bool)[0]
+ footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)
+ markers = ndi.label(markers_bool, structure=footprint)[0]
elif not isinstance(markers, (np.ndarray, list, tuple)):
# not array-like, assume int
# given int, assume that number of markers *within mask*.
| {"golden_diff": "diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py\n--- a/skimage/segmentation/_watershed.py\n+++ b/skimage/segmentation/_watershed.py\n@@ -73,7 +73,8 @@\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n- markers = ndi.label(markers_bool)[0]\n+ footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)\n+ markers = ndi.label(markers_bool, structure=footprint)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n", "issue": "Question: Why is default structuring element of 1-connectivity used by ndimage.label for getting the default markers(labeled local minima) for watershed segmentation?\n## Description\r\n\r\nIn the implemented watershed.py segmentation, one can pass a connectivity argument, when `local_minima` is called by watershed (when `markers=none`) it passes the same connectivity value onto the local minima function, this generates the `markers_bool`, which then is used by `ndimage.label()` to generate the labeled markers. `ndimage.label` can take a structure argument and it defaults to 1-connectivity structure if none are given. IMO it is more intuitive to have `ndimage.label` also use the same connectivity which is passed by the user to the watershed function instead of always using the default 1-connectivity.\r\nBelow is the line in the source I am referring to:\r\nhttps://github.com/scikit-image/scikit-image/blob/663e9e4b0f2ba88249bfef45bc65700555a3f24b/skimage/segmentation/_watershed.py#L76\r\n\r\nInstead of the above would it not be better to have?\r\n```python\r\nif connectivity==2:\r\n markers = ndi.label(markers_bool,\r\n structure = [[1,1,1],\r\n [1,1,1],\r\n [1,1,1]])[0]\r\n```\r\n\n", "before_files": [{"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\nOriginally part of CellProfiler, code licensed under both GPL and BSD licenses.\nWebsite: http://www.cellprofiler.org\n\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\n\nOriginal author: Lee Kamentsky\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed_cy\nfrom ..morphology.extrema import local_minima\nfrom ..morphology._util import (_validate_connectivity,\n _offsets_to_raveled_neighbors)\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask, connectivity):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n connectivity : int in {1, ..., image.ndim}\n The connectivity of the neighborhood of a pixel.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n n_pixels = image.size\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n else:\n mask = np.asanyarray(mask, dtype=bool)\n n_pixels = np.sum(mask)\n if mask.shape != image.shape:\n message = (\"`mask` (shape {}) must have same shape as \"\n \"`image` (shape {})\".format(mask.shape, image.shape))\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n markers = ndi.label(markers_bool)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n markers = regular_seeds(image.shape,\n int(markers / (n_pixels / image.size)))\n markers *= mask\n else:\n markers = np.asanyarray(markers) * mask\n if markers.shape != image.shape:\n message = (\"`markers` (shape {}) must have same shape as \"\n \"`image` (shape {})\".format(markers.shape, image.shape))\n raise ValueError(message)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image : ndarray (2-D, 3-D, ...) of integers\n Data array where the lowest value points are labeled first.\n markers : int, or ndarray of int, same shape as `image`, optional\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker. If\n ``None`` (no markers given), the local minima of the image are used as\n markers.\n connectivity : ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset : array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask : ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n\n Returns\n -------\n out : ndarray\n A labeled matrix of the same type and shape as markers\n\n See Also\n --------\n skimage.segmentation.random_walker : random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`\n https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> local_maxi = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)),\n ... indices=False)\n >>> markers = ndi.label(local_maxi)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask, connectivity)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _offsets_to_raveled_neighbors(\n image.shape, connectivity, center=offset)\n marker_locations = np.flatnonzero(output)\n image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize\n\n _watershed_cy.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n", "path": "skimage/segmentation/_watershed.py"}], "after_files": [{"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\nOriginally part of CellProfiler, code licensed under both GPL and BSD licenses.\nWebsite: http://www.cellprofiler.org\n\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\n\nOriginal author: Lee Kamentsky\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed_cy\nfrom ..morphology.extrema import local_minima\nfrom ..morphology._util import (_validate_connectivity,\n _offsets_to_raveled_neighbors)\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask, connectivity):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n connectivity : int in {1, ..., image.ndim}\n The connectivity of the neighborhood of a pixel.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n n_pixels = image.size\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n else:\n mask = np.asanyarray(mask, dtype=bool)\n n_pixels = np.sum(mask)\n if mask.shape != image.shape:\n message = (\"`mask` (shape {}) must have same shape as \"\n \"`image` (shape {})\".format(mask.shape, image.shape))\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)\n markers = ndi.label(markers_bool, structure=footprint)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n markers = regular_seeds(image.shape,\n int(markers / (n_pixels / image.size)))\n markers *= mask\n else:\n markers = np.asanyarray(markers) * mask\n if markers.shape != image.shape:\n message = (\"`markers` (shape {}) must have same shape as \"\n \"`image` (shape {})\".format(markers.shape, image.shape))\n raise ValueError(message)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image : ndarray (2-D, 3-D, ...) of integers\n Data array where the lowest value points are labeled first.\n markers : int, or ndarray of int, same shape as `image`, optional\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker. If\n ``None`` (no markers given), the local minima of the image are used as\n markers.\n connectivity : ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset : array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask : ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n\n Returns\n -------\n out : ndarray\n A labeled matrix of the same type and shape as markers\n\n See Also\n --------\n skimage.segmentation.random_walker : random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`\n https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> local_maxi = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)),\n ... indices=False)\n >>> markers = ndi.label(local_maxi)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask, connectivity)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _offsets_to_raveled_neighbors(\n image.shape, connectivity, center=offset)\n marker_locations = np.flatnonzero(output)\n image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize\n\n _watershed_cy.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n", "path": "skimage/segmentation/_watershed.py"}]} | 3,344 | 178 |
gh_patches_debug_2800 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1534 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add API to programatically create new requests.
Original discussion: https://discourse.mitmproxy.org/t/are-there-any-script-examples-for-creating-a-get-post-put/134
It would be great if we'd have a dedicated API to make new requests from scripts, e.g. `master.new_request()` or something along those lines. This would probably just call `new_request` and then `replay_request`, but this seems useful enough to spell out explicitly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/flow/master.py`
Content:
```
1 from __future__ import absolute_import, print_function, division
2
3 import os
4 import sys
5
6 from typing import Optional # noqa
7
8 import netlib.exceptions
9 from mitmproxy import controller
10 from mitmproxy import exceptions
11 from mitmproxy import models
12 from mitmproxy.flow import io
13 from mitmproxy.flow import modules
14 from mitmproxy.onboarding import app
15 from mitmproxy.protocol import http_replay
16
17
18 def event_sequence(f):
19 if isinstance(f, models.HTTPFlow):
20 if f.request:
21 yield "request", f
22 if f.response:
23 yield "responseheaders", f
24 yield "response", f
25 if f.error:
26 yield "error", f
27 elif isinstance(f, models.TCPFlow):
28 messages = f.messages
29 f.messages = []
30 f.reply = controller.DummyReply()
31 yield "tcp_open", f
32 while messages:
33 f.messages.append(messages.pop(0))
34 yield "tcp_message", f
35 if f.error:
36 yield "tcp_error", f
37 yield "tcp_close", f
38 else:
39 raise NotImplementedError
40
41
42 class FlowMaster(controller.Master):
43
44 @property
45 def server(self):
46 # At some point, we may want to have support for multiple servers.
47 # For now, this suffices.
48 if len(self.servers) > 0:
49 return self.servers[0]
50
51 def __init__(self, options, server, state):
52 super(FlowMaster, self).__init__(options)
53 if server:
54 self.add_server(server)
55 self.state = state
56 self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]
57 self.apps = modules.AppRegistry()
58
59 def start_app(self, host, port):
60 self.apps.add(app.mapp, host, port)
61
62 def set_stream_large_bodies(self, max_size):
63 if max_size is not None:
64 self.stream_large_bodies = modules.StreamLargeBodies(max_size)
65 else:
66 self.stream_large_bodies = False
67
68 def duplicate_flow(self, f):
69 """
70 Duplicate flow, and insert it into state without triggering any of
71 the normal flow events.
72 """
73 f2 = f.copy()
74 self.state.add_flow(f2)
75 return f2
76
77 def create_request(self, method, scheme, host, port, path):
78 """
79 this method creates a new artificial and minimalist request also adds it to flowlist
80 """
81 c = models.ClientConnection.make_dummy(("", 0))
82 s = models.ServerConnection.make_dummy((host, port))
83
84 f = models.HTTPFlow(c, s)
85 headers = models.Headers()
86
87 req = models.HTTPRequest(
88 "absolute",
89 method,
90 scheme,
91 host,
92 port,
93 path,
94 b"HTTP/1.1",
95 headers,
96 b""
97 )
98 f.request = req
99 self.load_flow(f)
100 return f
101
102 def load_flow(self, f):
103 """
104 Loads a flow
105 """
106 if isinstance(f, models.HTTPFlow):
107 if self.server and self.options.mode == "reverse":
108 f.request.host = self.server.config.upstream_server.address.host
109 f.request.port = self.server.config.upstream_server.address.port
110 f.request.scheme = self.server.config.upstream_server.scheme
111 f.reply = controller.DummyReply()
112 for e, o in event_sequence(f):
113 getattr(self, e)(o)
114
115 def load_flows(self, fr):
116 """
117 Load flows from a FlowReader object.
118 """
119 cnt = 0
120 for i in fr.stream():
121 cnt += 1
122 self.load_flow(i)
123 return cnt
124
125 def load_flows_file(self, path):
126 path = os.path.expanduser(path)
127 try:
128 if path == "-":
129 # This is incompatible with Python 3 - maybe we can use click?
130 freader = io.FlowReader(sys.stdin)
131 return self.load_flows(freader)
132 else:
133 with open(path, "rb") as f:
134 freader = io.FlowReader(f)
135 return self.load_flows(freader)
136 except IOError as v:
137 raise exceptions.FlowReadException(v.strerror)
138
139 def replay_request(self, f, block=False):
140 """
141 Replay a HTTP request to receive a new response from the server.
142
143 Args:
144 f: The flow to replay.
145 block: If True, this function will wait for the replay to finish.
146 This causes a deadlock if activated in the main thread.
147
148 Returns:
149 The thread object doing the replay.
150
151 Raises:
152 exceptions.ReplayException, if the flow is in a state
153 where it is ineligible for replay.
154 """
155
156 if f.live:
157 raise exceptions.ReplayException(
158 "Can't replay live flow."
159 )
160 if f.intercepted:
161 raise exceptions.ReplayException(
162 "Can't replay intercepted flow."
163 )
164 if f.request.raw_content is None:
165 raise exceptions.ReplayException(
166 "Can't replay flow with missing content."
167 )
168 if not f.request:
169 raise exceptions.ReplayException(
170 "Can't replay flow with missing request."
171 )
172
173 f.backup()
174 f.request.is_replay = True
175
176 f.response = None
177 f.error = None
178
179 rt = http_replay.RequestReplayThread(
180 self.server.config,
181 f,
182 self.event_queue,
183 self.should_exit
184 )
185 rt.start() # pragma: no cover
186 if block:
187 rt.join()
188 return rt
189
190 @controller.handler
191 def log(self, l):
192 self.add_log(l.msg, l.level)
193
194 @controller.handler
195 def clientconnect(self, root_layer):
196 pass
197
198 @controller.handler
199 def clientdisconnect(self, root_layer):
200 pass
201
202 @controller.handler
203 def serverconnect(self, server_conn):
204 pass
205
206 @controller.handler
207 def serverdisconnect(self, server_conn):
208 pass
209
210 @controller.handler
211 def next_layer(self, top_layer):
212 pass
213
214 @controller.handler
215 def error(self, f):
216 self.state.update_flow(f)
217
218 @controller.handler
219 def request(self, f):
220 if f.live:
221 app = self.apps.get(f.request)
222 if app:
223 err = app.serve(
224 f,
225 f.client_conn.wfile,
226 **{"mitmproxy.master": self}
227 )
228 if err:
229 self.add_log("Error in wsgi app. %s" % err, "error")
230 f.reply.kill()
231 return
232 if f not in self.state.flows: # don't add again on replay
233 self.state.add_flow(f)
234
235 @controller.handler
236 def responseheaders(self, f):
237 try:
238 if self.stream_large_bodies:
239 self.stream_large_bodies.run(f, False)
240 except netlib.exceptions.HttpException:
241 f.reply.kill()
242 return
243
244 @controller.handler
245 def response(self, f):
246 self.state.update_flow(f)
247
248 @controller.handler
249 def websockets_handshake(self, f):
250 pass
251
252 def handle_intercept(self, f):
253 self.state.update_flow(f)
254
255 def handle_accept_intercept(self, f):
256 self.state.update_flow(f)
257
258 @controller.handler
259 def tcp_open(self, flow):
260 # TODO: This would break mitmproxy currently.
261 # self.state.add_flow(flow)
262 pass
263
264 @controller.handler
265 def tcp_message(self, flow):
266 pass
267
268 @controller.handler
269 def tcp_error(self, flow):
270 pass
271
272 @controller.handler
273 def tcp_close(self, flow):
274 pass
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/flow/master.py b/mitmproxy/flow/master.py
--- a/mitmproxy/flow/master.py
+++ b/mitmproxy/flow/master.py
@@ -187,6 +187,10 @@
rt.join()
return rt
+ def new_request(self, method, scheme, host, port, path, block=False):
+ f = self.create_request(method, scheme, host, port, path)
+ return self.replay_request(f, block)
+
@controller.handler
def log(self, l):
self.add_log(l.msg, l.level)
| {"golden_diff": "diff --git a/mitmproxy/flow/master.py b/mitmproxy/flow/master.py\n--- a/mitmproxy/flow/master.py\n+++ b/mitmproxy/flow/master.py\n@@ -187,6 +187,10 @@\n rt.join()\n return rt\n \n+ def new_request(self, method, scheme, host, port, path, block=False):\n+ f = self.create_request(method, scheme, host, port, path)\n+ return self.replay_request(f, block)\n+\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n", "issue": "Add API to programatically create new requests.\nOriginal discussion: https://discourse.mitmproxy.org/t/are-there-any-script-examples-for-creating-a-get-post-put/134\n\nIt would be great if we'd have a dedicated API to make new requests from scripts, e.g. `master.new_request()` or something along those lines. This would probably just call `new_request` and then `replay_request`, but this seems useful enough to spell out explicitly.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\n\nimport os\nimport sys\n\nfrom typing import Optional # noqa\n\nimport netlib.exceptions\nfrom mitmproxy import controller\nfrom mitmproxy import exceptions\nfrom mitmproxy import models\nfrom mitmproxy.flow import io\nfrom mitmproxy.flow import modules\nfrom mitmproxy.onboarding import app\nfrom mitmproxy.protocol import http_replay\n\n\ndef event_sequence(f):\n if isinstance(f, models.HTTPFlow):\n if f.request:\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, models.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_open\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_close\", f\n else:\n raise NotImplementedError\n\n\nclass FlowMaster(controller.Master):\n\n @property\n def server(self):\n # At some point, we may want to have support for multiple servers.\n # For now, this suffices.\n if len(self.servers) > 0:\n return self.servers[0]\n\n def __init__(self, options, server, state):\n super(FlowMaster, self).__init__(options)\n if server:\n self.add_server(server)\n self.state = state\n self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]\n self.apps = modules.AppRegistry()\n\n def start_app(self, host, port):\n self.apps.add(app.mapp, host, port)\n\n def set_stream_large_bodies(self, max_size):\n if max_size is not None:\n self.stream_large_bodies = modules.StreamLargeBodies(max_size)\n else:\n self.stream_large_bodies = False\n\n def duplicate_flow(self, f):\n \"\"\"\n Duplicate flow, and insert it into state without triggering any of\n the normal flow events.\n \"\"\"\n f2 = f.copy()\n self.state.add_flow(f2)\n return f2\n\n def create_request(self, method, scheme, host, port, path):\n \"\"\"\n this method creates a new artificial and minimalist request also adds it to flowlist\n \"\"\"\n c = models.ClientConnection.make_dummy((\"\", 0))\n s = models.ServerConnection.make_dummy((host, port))\n\n f = models.HTTPFlow(c, s)\n headers = models.Headers()\n\n req = models.HTTPRequest(\n \"absolute\",\n method,\n scheme,\n host,\n port,\n path,\n b\"HTTP/1.1\",\n headers,\n b\"\"\n )\n f.request = req\n self.load_flow(f)\n return f\n\n def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n if isinstance(f, models.HTTPFlow):\n if self.server and self.options.mode == \"reverse\":\n f.request.host = self.server.config.upstream_server.address.host\n f.request.port = self.server.config.upstream_server.address.port\n f.request.scheme = self.server.config.upstream_server.scheme\n f.reply = controller.DummyReply()\n for e, o in event_sequence(f):\n getattr(self, e)(o)\n\n def load_flows(self, fr):\n \"\"\"\n Load flows from a FlowReader object.\n \"\"\"\n cnt = 0\n for i in fr.stream():\n cnt += 1\n self.load_flow(i)\n return cnt\n\n def load_flows_file(self, path):\n path = os.path.expanduser(path)\n try:\n if path == \"-\":\n # This is incompatible with Python 3 - maybe we can use click?\n freader = io.FlowReader(sys.stdin)\n return self.load_flows(freader)\n else:\n with open(path, \"rb\") as f:\n freader = io.FlowReader(f)\n return self.load_flows(freader)\n except IOError as v:\n raise exceptions.FlowReadException(v.strerror)\n\n def replay_request(self, f, block=False):\n \"\"\"\n Replay a HTTP request to receive a new response from the server.\n\n Args:\n f: The flow to replay.\n block: If True, this function will wait for the replay to finish.\n This causes a deadlock if activated in the main thread.\n\n Returns:\n The thread object doing the replay.\n\n Raises:\n exceptions.ReplayException, if the flow is in a state\n where it is ineligible for replay.\n \"\"\"\n\n if f.live:\n raise exceptions.ReplayException(\n \"Can't replay live flow.\"\n )\n if f.intercepted:\n raise exceptions.ReplayException(\n \"Can't replay intercepted flow.\"\n )\n if f.request.raw_content is None:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing content.\"\n )\n if not f.request:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing request.\"\n )\n\n f.backup()\n f.request.is_replay = True\n\n f.response = None\n f.error = None\n\n rt = http_replay.RequestReplayThread(\n self.server.config,\n f,\n self.event_queue,\n self.should_exit\n )\n rt.start() # pragma: no cover\n if block:\n rt.join()\n return rt\n\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n\n @controller.handler\n def clientconnect(self, root_layer):\n pass\n\n @controller.handler\n def clientdisconnect(self, root_layer):\n pass\n\n @controller.handler\n def serverconnect(self, server_conn):\n pass\n\n @controller.handler\n def serverdisconnect(self, server_conn):\n pass\n\n @controller.handler\n def next_layer(self, top_layer):\n pass\n\n @controller.handler\n def error(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def request(self, f):\n if f.live:\n app = self.apps.get(f.request)\n if app:\n err = app.serve(\n f,\n f.client_conn.wfile,\n **{\"mitmproxy.master\": self}\n )\n if err:\n self.add_log(\"Error in wsgi app. %s\" % err, \"error\")\n f.reply.kill()\n return\n if f not in self.state.flows: # don't add again on replay\n self.state.add_flow(f)\n\n @controller.handler\n def responseheaders(self, f):\n try:\n if self.stream_large_bodies:\n self.stream_large_bodies.run(f, False)\n except netlib.exceptions.HttpException:\n f.reply.kill()\n return\n\n @controller.handler\n def response(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def websockets_handshake(self, f):\n pass\n\n def handle_intercept(self, f):\n self.state.update_flow(f)\n\n def handle_accept_intercept(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def tcp_open(self, flow):\n # TODO: This would break mitmproxy currently.\n # self.state.add_flow(flow)\n pass\n\n @controller.handler\n def tcp_message(self, flow):\n pass\n\n @controller.handler\n def tcp_error(self, flow):\n pass\n\n @controller.handler\n def tcp_close(self, flow):\n pass\n", "path": "mitmproxy/flow/master.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function, division\n\nimport os\nimport sys\n\nfrom typing import Optional # noqa\n\nimport netlib.exceptions\nfrom mitmproxy import controller\nfrom mitmproxy import exceptions\nfrom mitmproxy import models\nfrom mitmproxy.flow import io\nfrom mitmproxy.flow import modules\nfrom mitmproxy.onboarding import app\nfrom mitmproxy.protocol import http_replay\n\n\ndef event_sequence(f):\n if isinstance(f, models.HTTPFlow):\n if f.request:\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, models.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_open\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_close\", f\n else:\n raise NotImplementedError\n\n\nclass FlowMaster(controller.Master):\n\n @property\n def server(self):\n # At some point, we may want to have support for multiple servers.\n # For now, this suffices.\n if len(self.servers) > 0:\n return self.servers[0]\n\n def __init__(self, options, server, state):\n super(FlowMaster, self).__init__(options)\n if server:\n self.add_server(server)\n self.state = state\n self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]\n self.apps = modules.AppRegistry()\n\n def start_app(self, host, port):\n self.apps.add(app.mapp, host, port)\n\n def set_stream_large_bodies(self, max_size):\n if max_size is not None:\n self.stream_large_bodies = modules.StreamLargeBodies(max_size)\n else:\n self.stream_large_bodies = False\n\n def duplicate_flow(self, f):\n \"\"\"\n Duplicate flow, and insert it into state without triggering any of\n the normal flow events.\n \"\"\"\n f2 = f.copy()\n self.state.add_flow(f2)\n return f2\n\n def create_request(self, method, scheme, host, port, path):\n \"\"\"\n this method creates a new artificial and minimalist request also adds it to flowlist\n \"\"\"\n c = models.ClientConnection.make_dummy((\"\", 0))\n s = models.ServerConnection.make_dummy((host, port))\n\n f = models.HTTPFlow(c, s)\n headers = models.Headers()\n\n req = models.HTTPRequest(\n \"absolute\",\n method,\n scheme,\n host,\n port,\n path,\n b\"HTTP/1.1\",\n headers,\n b\"\"\n )\n f.request = req\n self.load_flow(f)\n return f\n\n def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n if isinstance(f, models.HTTPFlow):\n if self.server and self.options.mode == \"reverse\":\n f.request.host = self.server.config.upstream_server.address.host\n f.request.port = self.server.config.upstream_server.address.port\n f.request.scheme = self.server.config.upstream_server.scheme\n f.reply = controller.DummyReply()\n for e, o in event_sequence(f):\n getattr(self, e)(o)\n\n def load_flows(self, fr):\n \"\"\"\n Load flows from a FlowReader object.\n \"\"\"\n cnt = 0\n for i in fr.stream():\n cnt += 1\n self.load_flow(i)\n return cnt\n\n def load_flows_file(self, path):\n path = os.path.expanduser(path)\n try:\n if path == \"-\":\n # This is incompatible with Python 3 - maybe we can use click?\n freader = io.FlowReader(sys.stdin)\n return self.load_flows(freader)\n else:\n with open(path, \"rb\") as f:\n freader = io.FlowReader(f)\n return self.load_flows(freader)\n except IOError as v:\n raise exceptions.FlowReadException(v.strerror)\n\n def replay_request(self, f, block=False):\n \"\"\"\n Replay a HTTP request to receive a new response from the server.\n\n Args:\n f: The flow to replay.\n block: If True, this function will wait for the replay to finish.\n This causes a deadlock if activated in the main thread.\n\n Returns:\n The thread object doing the replay.\n\n Raises:\n exceptions.ReplayException, if the flow is in a state\n where it is ineligible for replay.\n \"\"\"\n\n if f.live:\n raise exceptions.ReplayException(\n \"Can't replay live flow.\"\n )\n if f.intercepted:\n raise exceptions.ReplayException(\n \"Can't replay intercepted flow.\"\n )\n if f.request.raw_content is None:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing content.\"\n )\n if not f.request:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing request.\"\n )\n\n f.backup()\n f.request.is_replay = True\n\n f.response = None\n f.error = None\n\n rt = http_replay.RequestReplayThread(\n self.server.config,\n f,\n self.event_queue,\n self.should_exit\n )\n rt.start() # pragma: no cover\n if block:\n rt.join()\n return rt\n\n def new_request(self, method, scheme, host, port, path, block=False):\n f = self.create_request(method, scheme, host, port, path)\n return self.replay_request(f, block)\n\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n\n @controller.handler\n def clientconnect(self, root_layer):\n pass\n\n @controller.handler\n def clientdisconnect(self, root_layer):\n pass\n\n @controller.handler\n def serverconnect(self, server_conn):\n pass\n\n @controller.handler\n def serverdisconnect(self, server_conn):\n pass\n\n @controller.handler\n def next_layer(self, top_layer):\n pass\n\n @controller.handler\n def error(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def request(self, f):\n if f.live:\n app = self.apps.get(f.request)\n if app:\n err = app.serve(\n f,\n f.client_conn.wfile,\n **{\"mitmproxy.master\": self}\n )\n if err:\n self.add_log(\"Error in wsgi app. %s\" % err, \"error\")\n f.reply.kill()\n return\n if f not in self.state.flows: # don't add again on replay\n self.state.add_flow(f)\n\n @controller.handler\n def responseheaders(self, f):\n try:\n if self.stream_large_bodies:\n self.stream_large_bodies.run(f, False)\n except netlib.exceptions.HttpException:\n f.reply.kill()\n return\n\n @controller.handler\n def response(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def websockets_handshake(self, f):\n pass\n\n def handle_intercept(self, f):\n self.state.update_flow(f)\n\n def handle_accept_intercept(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def tcp_open(self, flow):\n # TODO: This would break mitmproxy currently.\n # self.state.add_flow(flow)\n pass\n\n @controller.handler\n def tcp_message(self, flow):\n pass\n\n @controller.handler\n def tcp_error(self, flow):\n pass\n\n @controller.handler\n def tcp_close(self, flow):\n pass\n", "path": "mitmproxy/flow/master.py"}]} | 2,737 | 134 |
gh_patches_debug_22743 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-3109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
With pydantic v2 a field marked as Optional[conint] throws an exception
## Description
I experienced this issue with `Optional[conint]`, but I suspect this will happen with any builtin pydantic field whose type ends up being an `Annotated` type. I was able to reproduce this in the existing tests within `tests/experimental/pydantic/test_fields.py`, test method `test_conint`. Just replace `field: pydantic.conint(lt=100)` with `field: Optional[pydantic.conint(lt=100)]`. The exception is copied below in the additional context.
At least in my case I can work around this by just creating a validator for my field instead of using `conint`. I only have one field that uses it. However, it's definitely not ideal.
Example code below to see the error:
```python
class Model(pydantic.BaseModel):
field: Optional[pydantic.conint(lt=100)]
@strawberry.experimental.pydantic.type(model=Model)
class Type:
field: strawberry.auto
```
## System Information
- Operating system: Mac OS
- Strawberry version: Need to use a new enough version for pydantic v2 support.
## Additional Context
I'm guessing we need to add some additional logic inside `replace_types_recursively` that handles the `Annotated` type more gracefully? One thing that was interesting was when I was debugging, the `Annotated` type only comes in with `Optional`. If I just directly use `conint`, the type comes in as just an `int`. I don't have a lot of experience with python type code so I was afraid to try and put in a fix.
<!-- Add any other relevant information about the problem here. -->
```
tests/experimental/pydantic/test_fields.py:92 (test_conint)
@needs_pydantic_v2
def test_conint():
class Model(pydantic.BaseModel):
field: Optional[pydantic.conint(lt=100)]
@strawberry.experimental.pydantic.type(Model)
> class Type:
test_fields.py:99:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../../strawberry/experimental/pydantic/object_type.py:183: in wrap
all_model_fields: List[DataclassCreationFields] = [
../../../strawberry/experimental/pydantic/object_type.py:184: in <listcomp>
_build_dataclass_creation_fields(
../../../strawberry/experimental/pydantic/object_type.py:67: in _build_dataclass_creation_fields
get_type_for_field(field, is_input)
../../../strawberry/experimental/pydantic/object_type.py:49: in get_type_for_field
replaced_type = replace_types_recursively(outer_type, is_input)
../../../strawberry/experimental/pydantic/fields.py:172: in replace_types_recursively
converted = tuple(
../../../strawberry/experimental/pydantic/fields.py:173: in <genexpr>
replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)
../../../strawberry/experimental/pydantic/fields.py:180: in replace_types_recursively
replaced_type = replaced_type.copy_with(converted)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = typing.Annotated[int, None, Interval(gt=None, ge=None, lt=100, le=None), None]
params = (<class 'int'>, None, Interval(gt=None, ge=None, lt=100, le=None), None)
def copy_with(self, params):
> assert len(params) == 1
E AssertionError
```
<!-- POLAR PLEDGE BADGE START -->
## Upvote & Fund
- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.
- We receive the funding once the issue is completed & confirmed by you.
- Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/strawberry-graphql/strawberry/issues/3107">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/experimental/pydantic/fields.py`
Content:
```
1 import builtins
2 from decimal import Decimal
3 from typing import Any, Dict, List, Optional, Type, Union
4 from uuid import UUID
5
6 import pydantic
7 from pydantic import BaseModel
8
9 from strawberry.experimental.pydantic._compat import (
10 IS_PYDANTIC_V1,
11 get_args,
12 get_origin,
13 is_new_type,
14 lenient_issubclass,
15 new_type_supertype,
16 )
17 from strawberry.experimental.pydantic.exceptions import (
18 UnregisteredTypeException,
19 UnsupportedTypeError,
20 )
21 from strawberry.types.types import StrawberryObjectDefinition
22
23 try:
24 from types import UnionType as TypingUnionType
25 except ImportError:
26 import sys
27
28 if sys.version_info < (3, 10):
29 TypingUnionType = ()
30 else:
31 raise
32
33 try:
34 from typing import GenericAlias as TypingGenericAlias # type: ignore
35 except ImportError:
36 import sys
37
38 # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
39 # we do this under a conditional to avoid a mypy :)
40 if sys.version_info < (3, 9):
41 TypingGenericAlias = ()
42 else:
43 raise
44
45 ATTR_TO_TYPE_MAP = {
46 "NoneStr": Optional[str],
47 "NoneBytes": Optional[bytes],
48 "StrBytes": None,
49 "NoneStrBytes": None,
50 "StrictStr": str,
51 "ConstrainedBytes": bytes,
52 "conbytes": bytes,
53 "ConstrainedStr": str,
54 "constr": str,
55 "EmailStr": str,
56 "PyObject": None,
57 "ConstrainedInt": int,
58 "conint": int,
59 "PositiveInt": int,
60 "NegativeInt": int,
61 "ConstrainedFloat": float,
62 "confloat": float,
63 "PositiveFloat": float,
64 "NegativeFloat": float,
65 "ConstrainedDecimal": Decimal,
66 "condecimal": Decimal,
67 "UUID1": UUID,
68 "UUID3": UUID,
69 "UUID4": UUID,
70 "UUID5": UUID,
71 "FilePath": None,
72 "DirectoryPath": None,
73 "Json": None,
74 "JsonWrapper": None,
75 "SecretStr": str,
76 "SecretBytes": bytes,
77 "StrictBool": bool,
78 "StrictInt": int,
79 "StrictFloat": float,
80 "PaymentCardNumber": None,
81 "ByteSize": None,
82 "AnyUrl": str,
83 "AnyHttpUrl": str,
84 "HttpUrl": str,
85 "PostgresDsn": str,
86 "RedisDsn": str,
87 }
88
89 ATTR_TO_TYPE_MAP_Pydantic_V2 = {
90 "EmailStr": str,
91 "SecretStr": str,
92 "SecretBytes": bytes,
93 "AnyUrl": str,
94 }
95
96 ATTR_TO_TYPE_MAP_Pydantic_Core_V2 = {
97 "MultiHostUrl": str,
98 }
99
100
101 def get_fields_map_for_v2() -> Dict[Any, Any]:
102 import pydantic_core
103
104 fields_map = {
105 getattr(pydantic, field_name): type
106 for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_V2.items()
107 if hasattr(pydantic, field_name)
108 }
109 fields_map.update(
110 {
111 getattr(pydantic_core, field_name): type
112 for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_Core_V2.items()
113 if hasattr(pydantic_core, field_name)
114 }
115 )
116
117 return fields_map
118
119
120 FIELDS_MAP = (
121 {
122 getattr(pydantic, field_name): type
123 for field_name, type in ATTR_TO_TYPE_MAP.items()
124 if hasattr(pydantic, field_name)
125 }
126 if IS_PYDANTIC_V1
127 else get_fields_map_for_v2()
128 )
129
130
131 def get_basic_type(type_: Any) -> Type[Any]:
132 if IS_PYDANTIC_V1:
133 # only pydantic v1 has these
134 if lenient_issubclass(type_, pydantic.ConstrainedInt):
135 return int
136 if lenient_issubclass(type_, pydantic.ConstrainedFloat):
137 return float
138 if lenient_issubclass(type_, pydantic.ConstrainedStr):
139 return str
140 if lenient_issubclass(type_, pydantic.ConstrainedList):
141 return List[get_basic_type(type_.item_type)] # type: ignore
142
143 if type_ in FIELDS_MAP:
144 type_ = FIELDS_MAP.get(type_)
145 if type_ is None:
146 raise UnsupportedTypeError()
147
148 if is_new_type(type_):
149 return new_type_supertype(type_)
150
151 return type_
152
153
154 def replace_pydantic_types(type_: Any, is_input: bool) -> Any:
155 if lenient_issubclass(type_, BaseModel):
156 attr = "_strawberry_input_type" if is_input else "_strawberry_type"
157 if hasattr(type_, attr):
158 return getattr(type_, attr)
159 else:
160 raise UnregisteredTypeException(type_)
161 return type_
162
163
164 def replace_types_recursively(type_: Any, is_input: bool) -> Any:
165 """Runs the conversions recursively into the arguments of generic types if any"""
166 basic_type = get_basic_type(type_)
167 replaced_type = replace_pydantic_types(basic_type, is_input)
168
169 origin = get_origin(type_)
170 if not origin or not hasattr(type_, "__args__"):
171 return replaced_type
172
173 converted = tuple(
174 replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)
175 )
176
177 if isinstance(replaced_type, TypingGenericAlias):
178 return TypingGenericAlias(origin, converted)
179 if isinstance(replaced_type, TypingUnionType):
180 return Union[converted]
181 replaced_type = replaced_type.copy_with(converted)
182
183 if isinstance(replaced_type, StrawberryObjectDefinition):
184 # TODO: Not sure if this is necessary. No coverage in tests
185 # TODO: Unnecessary with StrawberryObject
186 replaced_type = builtins.type(
187 replaced_type.name,
188 (),
189 {"__strawberry_definition__": replaced_type},
190 )
191
192 return replaced_type
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py
--- a/strawberry/experimental/pydantic/fields.py
+++ b/strawberry/experimental/pydantic/fields.py
@@ -1,6 +1,7 @@
import builtins
from decimal import Decimal
from typing import Any, Dict, List, Optional, Type, Union
+from typing_extensions import Annotated
from uuid import UUID
import pydantic
@@ -42,6 +43,7 @@
else:
raise
+
ATTR_TO_TYPE_MAP = {
"NoneStr": Optional[str],
"NoneBytes": Optional[bytes],
@@ -178,6 +180,11 @@
return TypingGenericAlias(origin, converted)
if isinstance(replaced_type, TypingUnionType):
return Union[converted]
+
+ # TODO: investigate if we could move the check for annotated to the top
+ if origin is Annotated and converted:
+ converted = (converted[0],)
+
replaced_type = replaced_type.copy_with(converted)
if isinstance(replaced_type, StrawberryObjectDefinition):
| {"golden_diff": "diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py\n--- a/strawberry/experimental/pydantic/fields.py\n+++ b/strawberry/experimental/pydantic/fields.py\n@@ -1,6 +1,7 @@\n import builtins\n from decimal import Decimal\n from typing import Any, Dict, List, Optional, Type, Union\n+from typing_extensions import Annotated\n from uuid import UUID\n \n import pydantic\n@@ -42,6 +43,7 @@\n else:\n raise\n \n+\n ATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n@@ -178,6 +180,11 @@\n return TypingGenericAlias(origin, converted)\n if isinstance(replaced_type, TypingUnionType):\n return Union[converted]\n+\n+ # TODO: investigate if we could move the check for annotated to the top\n+ if origin is Annotated and converted:\n+ converted = (converted[0],)\n+\n replaced_type = replaced_type.copy_with(converted)\n \n if isinstance(replaced_type, StrawberryObjectDefinition):\n", "issue": "With pydantic v2 a field marked as Optional[conint] throws an exception\n## Description\r\n\r\nI experienced this issue with `Optional[conint]`, but I suspect this will happen with any builtin pydantic field whose type ends up being an `Annotated` type. I was able to reproduce this in the existing tests within `tests/experimental/pydantic/test_fields.py`, test method `test_conint`. Just replace `field: pydantic.conint(lt=100)` with `field: Optional[pydantic.conint(lt=100)]`. The exception is copied below in the additional context.\r\n\r\nAt least in my case I can work around this by just creating a validator for my field instead of using `conint`. I only have one field that uses it. However, it's definitely not ideal.\r\n\r\nExample code below to see the error:\r\n\r\n```python\r\nclass Model(pydantic.BaseModel):\r\n field: Optional[pydantic.conint(lt=100)]\r\n\r\[email protected](model=Model)\r\nclass Type:\r\n field: strawberry.auto\r\n```\r\n\r\n## System Information\r\n\r\n - Operating system: Mac OS\r\n - Strawberry version: Need to use a new enough version for pydantic v2 support.\r\n\r\n## Additional Context\r\n\r\nI'm guessing we need to add some additional logic inside `replace_types_recursively` that handles the `Annotated` type more gracefully? One thing that was interesting was when I was debugging, the `Annotated` type only comes in with `Optional`. If I just directly use `conint`, the type comes in as just an `int`. I don't have a lot of experience with python type code so I was afraid to try and put in a fix.\r\n\r\n<!-- Add any other relevant information about the problem here. -->\r\n```\r\ntests/experimental/pydantic/test_fields.py:92 (test_conint)\r\n@needs_pydantic_v2\r\n def test_conint():\r\n class Model(pydantic.BaseModel):\r\n field: Optional[pydantic.conint(lt=100)]\r\n \r\n @strawberry.experimental.pydantic.type(Model)\r\n> class Type:\r\n\r\ntest_fields.py:99: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n../../../strawberry/experimental/pydantic/object_type.py:183: in wrap\r\n all_model_fields: List[DataclassCreationFields] = [\r\n../../../strawberry/experimental/pydantic/object_type.py:184: in <listcomp>\r\n _build_dataclass_creation_fields(\r\n../../../strawberry/experimental/pydantic/object_type.py:67: in _build_dataclass_creation_fields\r\n get_type_for_field(field, is_input)\r\n../../../strawberry/experimental/pydantic/object_type.py:49: in get_type_for_field\r\n replaced_type = replace_types_recursively(outer_type, is_input)\r\n../../../strawberry/experimental/pydantic/fields.py:172: in replace_types_recursively\r\n converted = tuple(\r\n../../../strawberry/experimental/pydantic/fields.py:173: in <genexpr>\r\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\r\n../../../strawberry/experimental/pydantic/fields.py:180: in replace_types_recursively\r\n replaced_type = replaced_type.copy_with(converted)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nself = typing.Annotated[int, None, Interval(gt=None, ge=None, lt=100, le=None), None]\r\nparams = (<class 'int'>, None, Interval(gt=None, ge=None, lt=100, le=None), None)\r\n\r\n def copy_with(self, params):\r\n> assert len(params) == 1\r\nE AssertionError\r\n```\n\n<!-- POLAR PLEDGE BADGE START -->\n## Upvote & Fund\n\n- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.\n- We receive the funding once the issue is completed & confirmed by you.\n- Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/strawberry-graphql/strawberry/issues/3107\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, Dict, List, Optional, Type, Union\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\n\nfrom strawberry.experimental.pydantic._compat import (\n IS_PYDANTIC_V1,\n get_args,\n get_origin,\n is_new_type,\n lenient_issubclass,\n new_type_supertype,\n)\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import StrawberryObjectDefinition\n\ntry:\n from types import UnionType as TypingUnionType\nexcept ImportError:\n import sys\n\n if sys.version_info < (3, 10):\n TypingUnionType = ()\n else:\n raise\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n import sys\n\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n # we do this under a conditional to avoid a mypy :)\n if sys.version_info < (3, 9):\n TypingGenericAlias = ()\n else:\n raise\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\nATTR_TO_TYPE_MAP_Pydantic_V2 = {\n \"EmailStr\": str,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"AnyUrl\": str,\n}\n\nATTR_TO_TYPE_MAP_Pydantic_Core_V2 = {\n \"MultiHostUrl\": str,\n}\n\n\ndef get_fields_map_for_v2() -> Dict[Any, Any]:\n import pydantic_core\n\n fields_map = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_V2.items()\n if hasattr(pydantic, field_name)\n }\n fields_map.update(\n {\n getattr(pydantic_core, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_Core_V2.items()\n if hasattr(pydantic_core, field_name)\n }\n )\n\n return fields_map\n\n\nFIELDS_MAP = (\n {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n }\n if IS_PYDANTIC_V1\n else get_fields_map_for_v2()\n)\n\n\ndef get_basic_type(type_: Any) -> Type[Any]:\n if IS_PYDANTIC_V1:\n # only pydantic v1 has these\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool) -> Any:\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n if isinstance(replaced_type, TypingUnionType):\n return Union[converted]\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, StrawberryObjectDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"__strawberry_definition__\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}], "after_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, Dict, List, Optional, Type, Union\nfrom typing_extensions import Annotated\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\n\nfrom strawberry.experimental.pydantic._compat import (\n IS_PYDANTIC_V1,\n get_args,\n get_origin,\n is_new_type,\n lenient_issubclass,\n new_type_supertype,\n)\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import StrawberryObjectDefinition\n\ntry:\n from types import UnionType as TypingUnionType\nexcept ImportError:\n import sys\n\n if sys.version_info < (3, 10):\n TypingUnionType = ()\n else:\n raise\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n import sys\n\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n # we do this under a conditional to avoid a mypy :)\n if sys.version_info < (3, 9):\n TypingGenericAlias = ()\n else:\n raise\n\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\nATTR_TO_TYPE_MAP_Pydantic_V2 = {\n \"EmailStr\": str,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"AnyUrl\": str,\n}\n\nATTR_TO_TYPE_MAP_Pydantic_Core_V2 = {\n \"MultiHostUrl\": str,\n}\n\n\ndef get_fields_map_for_v2() -> Dict[Any, Any]:\n import pydantic_core\n\n fields_map = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_V2.items()\n if hasattr(pydantic, field_name)\n }\n fields_map.update(\n {\n getattr(pydantic_core, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_Core_V2.items()\n if hasattr(pydantic_core, field_name)\n }\n )\n\n return fields_map\n\n\nFIELDS_MAP = (\n {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n }\n if IS_PYDANTIC_V1\n else get_fields_map_for_v2()\n)\n\n\ndef get_basic_type(type_: Any) -> Type[Any]:\n if IS_PYDANTIC_V1:\n # only pydantic v1 has these\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool) -> Any:\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n if isinstance(replaced_type, TypingUnionType):\n return Union[converted]\n\n # TODO: investigate if we could move the check for annotated to the top\n if origin is Annotated and converted:\n converted = (converted[0],)\n\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, StrawberryObjectDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"__strawberry_definition__\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}]} | 3,147 | 260 |
gh_patches_debug_35352 | rasdani/github-patches | git_diff | zulip__zulip-23388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Save failed: Only 2 custom profile fields can be displayed in the profile summary.
I tried to set a custom profile field to be displayed in the profile summary. However, it fails with an error message "Save failed: Only 2 custom profile fields can be displayed in the profile summary." even though no custom profile field has been set to displayed.
https://user-images.githubusercontent.com/2891235/198561250-59519f45-85a4-4490-8f72-9fae343930be.mov
Server: hosted Zulip server
```console
Zulip Server
Version 6.0-dev-2021-gd461383c1f
Forked from upstream at 6.0-dev-2009-g0affc7ac6f
```
Client: Zulip Desktop v5.9.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/views/custom_profile_fields.py`
Content:
```
1 from typing import List, Optional, cast
2
3 import orjson
4 from django.core.exceptions import ValidationError
5 from django.db import IntegrityError
6 from django.http import HttpRequest, HttpResponse
7 from django.utils.translation import gettext as _
8
9 from zerver.actions.custom_profile_fields import (
10 check_remove_custom_profile_field_value,
11 do_remove_realm_custom_profile_field,
12 do_update_user_custom_profile_data_if_changed,
13 try_add_realm_custom_profile_field,
14 try_add_realm_default_custom_profile_field,
15 try_reorder_realm_custom_profile_fields,
16 try_update_realm_custom_profile_field,
17 )
18 from zerver.decorator import human_users_only, require_realm_admin
19 from zerver.lib.exceptions import JsonableError
20 from zerver.lib.external_accounts import validate_external_account_field_data
21 from zerver.lib.request import REQ, has_request_variables
22 from zerver.lib.response import json_success
23 from zerver.lib.types import ProfileDataElementUpdateDict, ProfileFieldData, Validator
24 from zerver.lib.users import validate_user_custom_profile_data
25 from zerver.lib.validator import (
26 check_bool,
27 check_capped_string,
28 check_dict,
29 check_dict_only,
30 check_int,
31 check_list,
32 check_string,
33 check_union,
34 validate_select_field_data,
35 )
36 from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm
37
38
39 def list_realm_custom_profile_fields(
40 request: HttpRequest, user_profile: UserProfile
41 ) -> HttpResponse:
42 fields = custom_profile_fields_for_realm(user_profile.realm_id)
43 return json_success(request, data={"custom_fields": [f.as_dict() for f in fields]})
44
45
46 hint_validator = check_capped_string(CustomProfileField.HINT_MAX_LENGTH)
47 name_validator = check_capped_string(CustomProfileField.NAME_MAX_LENGTH)
48
49
50 def validate_field_name_and_hint(name: str, hint: str) -> None:
51 if not name.strip():
52 raise JsonableError(_("Label cannot be blank."))
53
54 try:
55 hint_validator("hint", hint)
56 name_validator("name", name)
57 except ValidationError as error:
58 raise JsonableError(error.message)
59
60
61 def validate_custom_field_data(field_type: int, field_data: ProfileFieldData) -> None:
62 try:
63 if field_type == CustomProfileField.SELECT:
64 # Choice type field must have at least have one choice
65 if len(field_data) < 1:
66 raise JsonableError(_("Field must have at least one choice."))
67 validate_select_field_data(field_data)
68 elif field_type == CustomProfileField.EXTERNAL_ACCOUNT:
69 validate_external_account_field_data(field_data)
70 except ValidationError as error:
71 raise JsonableError(error.message)
72
73
74 def validate_display_in_profile_summary_field(
75 field_type: int, display_in_profile_summary: bool
76 ) -> None:
77 if not display_in_profile_summary:
78 return
79
80 # The LONG_TEXT field type doesn't make sense visually for profile
81 # field summaries. The USER field type will require some further
82 # client support.
83 if field_type == CustomProfileField.LONG_TEXT or field_type == CustomProfileField.USER:
84 raise JsonableError(_("Field type not supported for display in profile summary."))
85
86
87 def is_default_external_field(field_type: int, field_data: ProfileFieldData) -> bool:
88 if field_type != CustomProfileField.EXTERNAL_ACCOUNT:
89 return False
90 if field_data["subtype"] == "custom":
91 return False
92 return True
93
94
95 def validate_custom_profile_field(
96 name: str,
97 hint: str,
98 field_type: int,
99 field_data: ProfileFieldData,
100 display_in_profile_summary: bool,
101 ) -> None:
102 # Validate field data
103 validate_custom_field_data(field_type, field_data)
104
105 if not is_default_external_field(field_type, field_data):
106 # If field is default external field then we will fetch all data
107 # from our default field dictionary, so no need to validate name or hint
108 # Validate field name, hint if not default external account field
109 validate_field_name_and_hint(name, hint)
110
111 field_types = [i[0] for i in CustomProfileField.FIELD_TYPE_CHOICES]
112 if field_type not in field_types:
113 raise JsonableError(_("Invalid field type."))
114
115 validate_display_in_profile_summary_field(field_type, display_in_profile_summary)
116
117
118 check_profile_field_data: Validator[ProfileFieldData] = check_dict(
119 value_validator=check_union([check_dict(value_validator=check_string), check_string])
120 )
121
122
123 def update_only_display_in_profile_summary(
124 requested_name: str,
125 requested_hint: str,
126 requested_field_data: ProfileFieldData,
127 existing_field: CustomProfileField,
128 ) -> bool:
129 if (
130 requested_name != existing_field.name
131 or requested_hint != existing_field.hint
132 or requested_field_data != orjson.loads(existing_field.field_data)
133 ):
134 return False
135 return True
136
137
138 def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:
139 query = CustomProfileField.objects.filter(display_in_profile_summary=True)
140 if profile_field_id is not None:
141 query = query.exclude(id=profile_field_id)
142 return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS
143
144
145 @require_realm_admin
146 @has_request_variables
147 def create_realm_custom_profile_field(
148 request: HttpRequest,
149 user_profile: UserProfile,
150 name: str = REQ(default="", converter=lambda var_name, x: x.strip()),
151 hint: str = REQ(default=""),
152 field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),
153 field_type: int = REQ(json_validator=check_int),
154 display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
155 ) -> HttpResponse:
156 if display_in_profile_summary and display_in_profile_summary_limit_reached():
157 raise JsonableError(
158 _("Only 2 custom profile fields can be displayed in the profile summary.")
159 )
160
161 validate_custom_profile_field(name, hint, field_type, field_data, display_in_profile_summary)
162 try:
163 if is_default_external_field(field_type, field_data):
164 field_subtype = field_data["subtype"]
165 assert isinstance(field_subtype, str)
166 field = try_add_realm_default_custom_profile_field(
167 realm=user_profile.realm,
168 field_subtype=field_subtype,
169 display_in_profile_summary=display_in_profile_summary,
170 )
171 return json_success(request, data={"id": field.id})
172 else:
173 field = try_add_realm_custom_profile_field(
174 realm=user_profile.realm,
175 name=name,
176 field_data=field_data,
177 field_type=field_type,
178 hint=hint,
179 display_in_profile_summary=display_in_profile_summary,
180 )
181 return json_success(request, data={"id": field.id})
182 except IntegrityError:
183 raise JsonableError(_("A field with that label already exists."))
184
185
186 @require_realm_admin
187 def delete_realm_custom_profile_field(
188 request: HttpRequest, user_profile: UserProfile, field_id: int
189 ) -> HttpResponse:
190 try:
191 field = CustomProfileField.objects.get(id=field_id)
192 except CustomProfileField.DoesNotExist:
193 raise JsonableError(_("Field id {id} not found.").format(id=field_id))
194
195 do_remove_realm_custom_profile_field(realm=user_profile.realm, field=field)
196 return json_success(request)
197
198
199 @require_realm_admin
200 @has_request_variables
201 def update_realm_custom_profile_field(
202 request: HttpRequest,
203 user_profile: UserProfile,
204 field_id: int,
205 name: str = REQ(default="", converter=lambda var_name, x: x.strip()),
206 hint: str = REQ(default=""),
207 field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),
208 display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
209 ) -> HttpResponse:
210 realm = user_profile.realm
211 try:
212 field = CustomProfileField.objects.get(realm=realm, id=field_id)
213 except CustomProfileField.DoesNotExist:
214 raise JsonableError(_("Field id {id} not found.").format(id=field_id))
215
216 if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):
217 raise JsonableError(
218 _("Only 2 custom profile fields can be displayed in the profile summary.")
219 )
220
221 if field.field_type == CustomProfileField.EXTERNAL_ACCOUNT:
222 # HACK: Allow changing the display_in_profile_summary property
223 # of default external account types, but not any others.
224 #
225 # TODO: Make the name/hint/field_data parameters optional, and
226 # just require that None was passed for all of them for this case.
227 if is_default_external_field(
228 field.field_type, orjson.loads(field.field_data)
229 ) and not update_only_display_in_profile_summary(name, hint, field_data, field):
230 raise JsonableError(_("Default custom field cannot be updated."))
231
232 validate_custom_profile_field(
233 name, hint, field.field_type, field_data, display_in_profile_summary
234 )
235 try:
236 try_update_realm_custom_profile_field(
237 realm,
238 field,
239 name,
240 hint=hint,
241 field_data=field_data,
242 display_in_profile_summary=display_in_profile_summary,
243 )
244 except IntegrityError:
245 raise JsonableError(_("A field with that label already exists."))
246 return json_success(request)
247
248
249 @require_realm_admin
250 @has_request_variables
251 def reorder_realm_custom_profile_fields(
252 request: HttpRequest,
253 user_profile: UserProfile,
254 order: List[int] = REQ(json_validator=check_list(check_int)),
255 ) -> HttpResponse:
256 try_reorder_realm_custom_profile_fields(user_profile.realm, order)
257 return json_success(request)
258
259
260 @human_users_only
261 @has_request_variables
262 def remove_user_custom_profile_data(
263 request: HttpRequest,
264 user_profile: UserProfile,
265 data: List[int] = REQ(json_validator=check_list(check_int)),
266 ) -> HttpResponse:
267 for field_id in data:
268 check_remove_custom_profile_field_value(user_profile, field_id)
269 return json_success(request)
270
271
272 check_profile_data_element_update_dict = cast(
273 Validator[ProfileDataElementUpdateDict],
274 check_dict_only(
275 [
276 ("id", check_int),
277 ("value", check_union([check_string, check_list(check_int)])),
278 ]
279 ),
280 )
281
282
283 @human_users_only
284 @has_request_variables
285 def update_user_custom_profile_data(
286 request: HttpRequest,
287 user_profile: UserProfile,
288 data: List[ProfileDataElementUpdateDict] = REQ(
289 json_validator=check_list(
290 check_profile_data_element_update_dict,
291 )
292 ),
293 ) -> HttpResponse:
294
295 validate_user_custom_profile_data(user_profile.realm.id, data)
296 do_update_user_custom_profile_data_if_changed(user_profile, data)
297 # We need to call this explicitly otherwise constraints are not check
298 return json_success(request)
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/views/custom_profile_fields.py b/zerver/views/custom_profile_fields.py
--- a/zerver/views/custom_profile_fields.py
+++ b/zerver/views/custom_profile_fields.py
@@ -33,7 +33,7 @@
check_union,
validate_select_field_data,
)
-from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm
+from zerver.models import CustomProfileField, Realm, UserProfile, custom_profile_fields_for_realm
def list_realm_custom_profile_fields(
@@ -135,8 +135,10 @@
return True
-def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:
- query = CustomProfileField.objects.filter(display_in_profile_summary=True)
+def display_in_profile_summary_limit_reached(
+ realm: Realm, profile_field_id: Optional[int] = None
+) -> bool:
+ query = CustomProfileField.objects.filter(realm=realm, display_in_profile_summary=True)
if profile_field_id is not None:
query = query.exclude(id=profile_field_id)
return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS
@@ -153,7 +155,7 @@
field_type: int = REQ(json_validator=check_int),
display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
) -> HttpResponse:
- if display_in_profile_summary and display_in_profile_summary_limit_reached():
+ if display_in_profile_summary and display_in_profile_summary_limit_reached(user_profile.realm):
raise JsonableError(
_("Only 2 custom profile fields can be displayed in the profile summary.")
)
@@ -213,7 +215,9 @@
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
- if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):
+ if display_in_profile_summary and display_in_profile_summary_limit_reached(
+ user_profile.realm, field.id
+ ):
raise JsonableError(
_("Only 2 custom profile fields can be displayed in the profile summary.")
)
| {"golden_diff": "diff --git a/zerver/views/custom_profile_fields.py b/zerver/views/custom_profile_fields.py\n--- a/zerver/views/custom_profile_fields.py\n+++ b/zerver/views/custom_profile_fields.py\n@@ -33,7 +33,7 @@\n check_union,\n validate_select_field_data,\n )\n-from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm\n+from zerver.models import CustomProfileField, Realm, UserProfile, custom_profile_fields_for_realm\n \n \n def list_realm_custom_profile_fields(\n@@ -135,8 +135,10 @@\n return True\n \n \n-def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:\n- query = CustomProfileField.objects.filter(display_in_profile_summary=True)\n+def display_in_profile_summary_limit_reached(\n+ realm: Realm, profile_field_id: Optional[int] = None\n+) -> bool:\n+ query = CustomProfileField.objects.filter(realm=realm, display_in_profile_summary=True)\n if profile_field_id is not None:\n query = query.exclude(id=profile_field_id)\n return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS\n@@ -153,7 +155,7 @@\n field_type: int = REQ(json_validator=check_int),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n ) -> HttpResponse:\n- if display_in_profile_summary and display_in_profile_summary_limit_reached():\n+ if display_in_profile_summary and display_in_profile_summary_limit_reached(user_profile.realm):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n@@ -213,7 +215,9 @@\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n \n- if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):\n+ if display_in_profile_summary and display_in_profile_summary_limit_reached(\n+ user_profile.realm, field.id\n+ ):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n", "issue": "Save failed: Only 2 custom profile fields can be displayed in the profile summary.\nI tried to set a custom profile field to be displayed in the profile summary. However, it fails with an error message \"Save failed: Only 2 custom profile fields can be displayed in the profile summary.\" even though no custom profile field has been set to displayed.\r\n\r\nhttps://user-images.githubusercontent.com/2891235/198561250-59519f45-85a4-4490-8f72-9fae343930be.mov\r\n\r\n\r\nServer: hosted Zulip server\r\n```console\r\nZulip Server\r\nVersion 6.0-dev-2021-gd461383c1f \r\nForked from upstream at 6.0-dev-2009-g0affc7ac6f\r\n```\r\n\r\nClient: Zulip Desktop v5.9.3\n", "before_files": [{"content": "from typing import List, Optional, cast\n\nimport orjson\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom zerver.actions.custom_profile_fields import (\n check_remove_custom_profile_field_value,\n do_remove_realm_custom_profile_field,\n do_update_user_custom_profile_data_if_changed,\n try_add_realm_custom_profile_field,\n try_add_realm_default_custom_profile_field,\n try_reorder_realm_custom_profile_fields,\n try_update_realm_custom_profile_field,\n)\nfrom zerver.decorator import human_users_only, require_realm_admin\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.external_accounts import validate_external_account_field_data\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.types import ProfileDataElementUpdateDict, ProfileFieldData, Validator\nfrom zerver.lib.users import validate_user_custom_profile_data\nfrom zerver.lib.validator import (\n check_bool,\n check_capped_string,\n check_dict,\n check_dict_only,\n check_int,\n check_list,\n check_string,\n check_union,\n validate_select_field_data,\n)\nfrom zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm\n\n\ndef list_realm_custom_profile_fields(\n request: HttpRequest, user_profile: UserProfile\n) -> HttpResponse:\n fields = custom_profile_fields_for_realm(user_profile.realm_id)\n return json_success(request, data={\"custom_fields\": [f.as_dict() for f in fields]})\n\n\nhint_validator = check_capped_string(CustomProfileField.HINT_MAX_LENGTH)\nname_validator = check_capped_string(CustomProfileField.NAME_MAX_LENGTH)\n\n\ndef validate_field_name_and_hint(name: str, hint: str) -> None:\n if not name.strip():\n raise JsonableError(_(\"Label cannot be blank.\"))\n\n try:\n hint_validator(\"hint\", hint)\n name_validator(\"name\", name)\n except ValidationError as error:\n raise JsonableError(error.message)\n\n\ndef validate_custom_field_data(field_type: int, field_data: ProfileFieldData) -> None:\n try:\n if field_type == CustomProfileField.SELECT:\n # Choice type field must have at least have one choice\n if len(field_data) < 1:\n raise JsonableError(_(\"Field must have at least one choice.\"))\n validate_select_field_data(field_data)\n elif field_type == CustomProfileField.EXTERNAL_ACCOUNT:\n validate_external_account_field_data(field_data)\n except ValidationError as error:\n raise JsonableError(error.message)\n\n\ndef validate_display_in_profile_summary_field(\n field_type: int, display_in_profile_summary: bool\n) -> None:\n if not display_in_profile_summary:\n return\n\n # The LONG_TEXT field type doesn't make sense visually for profile\n # field summaries. The USER field type will require some further\n # client support.\n if field_type == CustomProfileField.LONG_TEXT or field_type == CustomProfileField.USER:\n raise JsonableError(_(\"Field type not supported for display in profile summary.\"))\n\n\ndef is_default_external_field(field_type: int, field_data: ProfileFieldData) -> bool:\n if field_type != CustomProfileField.EXTERNAL_ACCOUNT:\n return False\n if field_data[\"subtype\"] == \"custom\":\n return False\n return True\n\n\ndef validate_custom_profile_field(\n name: str,\n hint: str,\n field_type: int,\n field_data: ProfileFieldData,\n display_in_profile_summary: bool,\n) -> None:\n # Validate field data\n validate_custom_field_data(field_type, field_data)\n\n if not is_default_external_field(field_type, field_data):\n # If field is default external field then we will fetch all data\n # from our default field dictionary, so no need to validate name or hint\n # Validate field name, hint if not default external account field\n validate_field_name_and_hint(name, hint)\n\n field_types = [i[0] for i in CustomProfileField.FIELD_TYPE_CHOICES]\n if field_type not in field_types:\n raise JsonableError(_(\"Invalid field type.\"))\n\n validate_display_in_profile_summary_field(field_type, display_in_profile_summary)\n\n\ncheck_profile_field_data: Validator[ProfileFieldData] = check_dict(\n value_validator=check_union([check_dict(value_validator=check_string), check_string])\n)\n\n\ndef update_only_display_in_profile_summary(\n requested_name: str,\n requested_hint: str,\n requested_field_data: ProfileFieldData,\n existing_field: CustomProfileField,\n) -> bool:\n if (\n requested_name != existing_field.name\n or requested_hint != existing_field.hint\n or requested_field_data != orjson.loads(existing_field.field_data)\n ):\n return False\n return True\n\n\ndef display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:\n query = CustomProfileField.objects.filter(display_in_profile_summary=True)\n if profile_field_id is not None:\n query = query.exclude(id=profile_field_id)\n return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS\n\n\n@require_realm_admin\n@has_request_variables\ndef create_realm_custom_profile_field(\n request: HttpRequest,\n user_profile: UserProfile,\n name: str = REQ(default=\"\", converter=lambda var_name, x: x.strip()),\n hint: str = REQ(default=\"\"),\n field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),\n field_type: int = REQ(json_validator=check_int),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n) -> HttpResponse:\n if display_in_profile_summary and display_in_profile_summary_limit_reached():\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n\n validate_custom_profile_field(name, hint, field_type, field_data, display_in_profile_summary)\n try:\n if is_default_external_field(field_type, field_data):\n field_subtype = field_data[\"subtype\"]\n assert isinstance(field_subtype, str)\n field = try_add_realm_default_custom_profile_field(\n realm=user_profile.realm,\n field_subtype=field_subtype,\n display_in_profile_summary=display_in_profile_summary,\n )\n return json_success(request, data={\"id\": field.id})\n else:\n field = try_add_realm_custom_profile_field(\n realm=user_profile.realm,\n name=name,\n field_data=field_data,\n field_type=field_type,\n hint=hint,\n display_in_profile_summary=display_in_profile_summary,\n )\n return json_success(request, data={\"id\": field.id})\n except IntegrityError:\n raise JsonableError(_(\"A field with that label already exists.\"))\n\n\n@require_realm_admin\ndef delete_realm_custom_profile_field(\n request: HttpRequest, user_profile: UserProfile, field_id: int\n) -> HttpResponse:\n try:\n field = CustomProfileField.objects.get(id=field_id)\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n\n do_remove_realm_custom_profile_field(realm=user_profile.realm, field=field)\n return json_success(request)\n\n\n@require_realm_admin\n@has_request_variables\ndef update_realm_custom_profile_field(\n request: HttpRequest,\n user_profile: UserProfile,\n field_id: int,\n name: str = REQ(default=\"\", converter=lambda var_name, x: x.strip()),\n hint: str = REQ(default=\"\"),\n field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n) -> HttpResponse:\n realm = user_profile.realm\n try:\n field = CustomProfileField.objects.get(realm=realm, id=field_id)\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n\n if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n\n if field.field_type == CustomProfileField.EXTERNAL_ACCOUNT:\n # HACK: Allow changing the display_in_profile_summary property\n # of default external account types, but not any others.\n #\n # TODO: Make the name/hint/field_data parameters optional, and\n # just require that None was passed for all of them for this case.\n if is_default_external_field(\n field.field_type, orjson.loads(field.field_data)\n ) and not update_only_display_in_profile_summary(name, hint, field_data, field):\n raise JsonableError(_(\"Default custom field cannot be updated.\"))\n\n validate_custom_profile_field(\n name, hint, field.field_type, field_data, display_in_profile_summary\n )\n try:\n try_update_realm_custom_profile_field(\n realm,\n field,\n name,\n hint=hint,\n field_data=field_data,\n display_in_profile_summary=display_in_profile_summary,\n )\n except IntegrityError:\n raise JsonableError(_(\"A field with that label already exists.\"))\n return json_success(request)\n\n\n@require_realm_admin\n@has_request_variables\ndef reorder_realm_custom_profile_fields(\n request: HttpRequest,\n user_profile: UserProfile,\n order: List[int] = REQ(json_validator=check_list(check_int)),\n) -> HttpResponse:\n try_reorder_realm_custom_profile_fields(user_profile.realm, order)\n return json_success(request)\n\n\n@human_users_only\n@has_request_variables\ndef remove_user_custom_profile_data(\n request: HttpRequest,\n user_profile: UserProfile,\n data: List[int] = REQ(json_validator=check_list(check_int)),\n) -> HttpResponse:\n for field_id in data:\n check_remove_custom_profile_field_value(user_profile, field_id)\n return json_success(request)\n\n\ncheck_profile_data_element_update_dict = cast(\n Validator[ProfileDataElementUpdateDict],\n check_dict_only(\n [\n (\"id\", check_int),\n (\"value\", check_union([check_string, check_list(check_int)])),\n ]\n ),\n)\n\n\n@human_users_only\n@has_request_variables\ndef update_user_custom_profile_data(\n request: HttpRequest,\n user_profile: UserProfile,\n data: List[ProfileDataElementUpdateDict] = REQ(\n json_validator=check_list(\n check_profile_data_element_update_dict,\n )\n ),\n) -> HttpResponse:\n\n validate_user_custom_profile_data(user_profile.realm.id, data)\n do_update_user_custom_profile_data_if_changed(user_profile, data)\n # We need to call this explicitly otherwise constraints are not check\n return json_success(request)\n", "path": "zerver/views/custom_profile_fields.py"}], "after_files": [{"content": "from typing import List, Optional, cast\n\nimport orjson\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom zerver.actions.custom_profile_fields import (\n check_remove_custom_profile_field_value,\n do_remove_realm_custom_profile_field,\n do_update_user_custom_profile_data_if_changed,\n try_add_realm_custom_profile_field,\n try_add_realm_default_custom_profile_field,\n try_reorder_realm_custom_profile_fields,\n try_update_realm_custom_profile_field,\n)\nfrom zerver.decorator import human_users_only, require_realm_admin\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.external_accounts import validate_external_account_field_data\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.types import ProfileDataElementUpdateDict, ProfileFieldData, Validator\nfrom zerver.lib.users import validate_user_custom_profile_data\nfrom zerver.lib.validator import (\n check_bool,\n check_capped_string,\n check_dict,\n check_dict_only,\n check_int,\n check_list,\n check_string,\n check_union,\n validate_select_field_data,\n)\nfrom zerver.models import CustomProfileField, Realm, UserProfile, custom_profile_fields_for_realm\n\n\ndef list_realm_custom_profile_fields(\n request: HttpRequest, user_profile: UserProfile\n) -> HttpResponse:\n fields = custom_profile_fields_for_realm(user_profile.realm_id)\n return json_success(request, data={\"custom_fields\": [f.as_dict() for f in fields]})\n\n\nhint_validator = check_capped_string(CustomProfileField.HINT_MAX_LENGTH)\nname_validator = check_capped_string(CustomProfileField.NAME_MAX_LENGTH)\n\n\ndef validate_field_name_and_hint(name: str, hint: str) -> None:\n if not name.strip():\n raise JsonableError(_(\"Label cannot be blank.\"))\n\n try:\n hint_validator(\"hint\", hint)\n name_validator(\"name\", name)\n except ValidationError as error:\n raise JsonableError(error.message)\n\n\ndef validate_custom_field_data(field_type: int, field_data: ProfileFieldData) -> None:\n try:\n if field_type == CustomProfileField.SELECT:\n # Choice type field must have at least have one choice\n if len(field_data) < 1:\n raise JsonableError(_(\"Field must have at least one choice.\"))\n validate_select_field_data(field_data)\n elif field_type == CustomProfileField.EXTERNAL_ACCOUNT:\n validate_external_account_field_data(field_data)\n except ValidationError as error:\n raise JsonableError(error.message)\n\n\ndef validate_display_in_profile_summary_field(\n field_type: int, display_in_profile_summary: bool\n) -> None:\n if not display_in_profile_summary:\n return\n\n # The LONG_TEXT field type doesn't make sense visually for profile\n # field summaries. The USER field type will require some further\n # client support.\n if field_type == CustomProfileField.LONG_TEXT or field_type == CustomProfileField.USER:\n raise JsonableError(_(\"Field type not supported for display in profile summary.\"))\n\n\ndef is_default_external_field(field_type: int, field_data: ProfileFieldData) -> bool:\n if field_type != CustomProfileField.EXTERNAL_ACCOUNT:\n return False\n if field_data[\"subtype\"] == \"custom\":\n return False\n return True\n\n\ndef validate_custom_profile_field(\n name: str,\n hint: str,\n field_type: int,\n field_data: ProfileFieldData,\n display_in_profile_summary: bool,\n) -> None:\n # Validate field data\n validate_custom_field_data(field_type, field_data)\n\n if not is_default_external_field(field_type, field_data):\n # If field is default external field then we will fetch all data\n # from our default field dictionary, so no need to validate name or hint\n # Validate field name, hint if not default external account field\n validate_field_name_and_hint(name, hint)\n\n field_types = [i[0] for i in CustomProfileField.FIELD_TYPE_CHOICES]\n if field_type not in field_types:\n raise JsonableError(_(\"Invalid field type.\"))\n\n validate_display_in_profile_summary_field(field_type, display_in_profile_summary)\n\n\ncheck_profile_field_data: Validator[ProfileFieldData] = check_dict(\n value_validator=check_union([check_dict(value_validator=check_string), check_string])\n)\n\n\ndef update_only_display_in_profile_summary(\n requested_name: str,\n requested_hint: str,\n requested_field_data: ProfileFieldData,\n existing_field: CustomProfileField,\n) -> bool:\n if (\n requested_name != existing_field.name\n or requested_hint != existing_field.hint\n or requested_field_data != orjson.loads(existing_field.field_data)\n ):\n return False\n return True\n\n\ndef display_in_profile_summary_limit_reached(\n realm: Realm, profile_field_id: Optional[int] = None\n) -> bool:\n query = CustomProfileField.objects.filter(realm=realm, display_in_profile_summary=True)\n if profile_field_id is not None:\n query = query.exclude(id=profile_field_id)\n return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS\n\n\n@require_realm_admin\n@has_request_variables\ndef create_realm_custom_profile_field(\n request: HttpRequest,\n user_profile: UserProfile,\n name: str = REQ(default=\"\", converter=lambda var_name, x: x.strip()),\n hint: str = REQ(default=\"\"),\n field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),\n field_type: int = REQ(json_validator=check_int),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n) -> HttpResponse:\n if display_in_profile_summary and display_in_profile_summary_limit_reached(user_profile.realm):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n\n validate_custom_profile_field(name, hint, field_type, field_data, display_in_profile_summary)\n try:\n if is_default_external_field(field_type, field_data):\n field_subtype = field_data[\"subtype\"]\n assert isinstance(field_subtype, str)\n field = try_add_realm_default_custom_profile_field(\n realm=user_profile.realm,\n field_subtype=field_subtype,\n display_in_profile_summary=display_in_profile_summary,\n )\n return json_success(request, data={\"id\": field.id})\n else:\n field = try_add_realm_custom_profile_field(\n realm=user_profile.realm,\n name=name,\n field_data=field_data,\n field_type=field_type,\n hint=hint,\n display_in_profile_summary=display_in_profile_summary,\n )\n return json_success(request, data={\"id\": field.id})\n except IntegrityError:\n raise JsonableError(_(\"A field with that label already exists.\"))\n\n\n@require_realm_admin\ndef delete_realm_custom_profile_field(\n request: HttpRequest, user_profile: UserProfile, field_id: int\n) -> HttpResponse:\n try:\n field = CustomProfileField.objects.get(id=field_id)\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n\n do_remove_realm_custom_profile_field(realm=user_profile.realm, field=field)\n return json_success(request)\n\n\n@require_realm_admin\n@has_request_variables\ndef update_realm_custom_profile_field(\n request: HttpRequest,\n user_profile: UserProfile,\n field_id: int,\n name: str = REQ(default=\"\", converter=lambda var_name, x: x.strip()),\n hint: str = REQ(default=\"\"),\n field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n) -> HttpResponse:\n realm = user_profile.realm\n try:\n field = CustomProfileField.objects.get(realm=realm, id=field_id)\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n\n if display_in_profile_summary and display_in_profile_summary_limit_reached(\n user_profile.realm, field.id\n ):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n\n if field.field_type == CustomProfileField.EXTERNAL_ACCOUNT:\n # HACK: Allow changing the display_in_profile_summary property\n # of default external account types, but not any others.\n #\n # TODO: Make the name/hint/field_data parameters optional, and\n # just require that None was passed for all of them for this case.\n if is_default_external_field(\n field.field_type, orjson.loads(field.field_data)\n ) and not update_only_display_in_profile_summary(name, hint, field_data, field):\n raise JsonableError(_(\"Default custom field cannot be updated.\"))\n\n validate_custom_profile_field(\n name, hint, field.field_type, field_data, display_in_profile_summary\n )\n try:\n try_update_realm_custom_profile_field(\n realm,\n field,\n name,\n hint=hint,\n field_data=field_data,\n display_in_profile_summary=display_in_profile_summary,\n )\n except IntegrityError:\n raise JsonableError(_(\"A field with that label already exists.\"))\n return json_success(request)\n\n\n@require_realm_admin\n@has_request_variables\ndef reorder_realm_custom_profile_fields(\n request: HttpRequest,\n user_profile: UserProfile,\n order: List[int] = REQ(json_validator=check_list(check_int)),\n) -> HttpResponse:\n try_reorder_realm_custom_profile_fields(user_profile.realm, order)\n return json_success(request)\n\n\n@human_users_only\n@has_request_variables\ndef remove_user_custom_profile_data(\n request: HttpRequest,\n user_profile: UserProfile,\n data: List[int] = REQ(json_validator=check_list(check_int)),\n) -> HttpResponse:\n for field_id in data:\n check_remove_custom_profile_field_value(user_profile, field_id)\n return json_success(request)\n\n\ncheck_profile_data_element_update_dict = cast(\n Validator[ProfileDataElementUpdateDict],\n check_dict_only(\n [\n (\"id\", check_int),\n (\"value\", check_union([check_string, check_list(check_int)])),\n ]\n ),\n)\n\n\n@human_users_only\n@has_request_variables\ndef update_user_custom_profile_data(\n request: HttpRequest,\n user_profile: UserProfile,\n data: List[ProfileDataElementUpdateDict] = REQ(\n json_validator=check_list(\n check_profile_data_element_update_dict,\n )\n ),\n) -> HttpResponse:\n\n validate_user_custom_profile_data(user_profile.realm.id, data)\n do_update_user_custom_profile_data_if_changed(user_profile, data)\n # We need to call this explicitly otherwise constraints are not check\n return json_success(request)\n", "path": "zerver/views/custom_profile_fields.py"}]} | 3,563 | 474 |
gh_patches_debug_13361 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5230 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MacOS onefile package, GUI does not display in High Resolution mode by default. Now it's 2019, so I suggest make the 'NSHighResolutionCapable' option a default.
Hi dear developers,
I found a small issue on mac os x, after running the command 'pyinstaller --onefile --noconsole myscript.py', there are 2 generated files, one is a Unix executable, the other one is a mac os APP PACKAGE.
My program uses Tkinter for GUI, When the program is launched from the unix executable, it displays in the high resolution mode like a normal mac os app. But when launched from the APP Package, it will display in a low resolution mode, which looks really strange on today's Mac.
After a little search, I found this topic in Stackoverflow, https://stackoverflow.com/questions/40484659/pyinstaller-tkinter-window-low-resolution-in-app-bundle-but-not-in-app-program , added some configs as this topic suggests:
app = BUNDLE(exe,
name='myscript.app',
icon=None,
bundle_identifier=None
**info_plist={
'NSHighResolutionCapable': 'True'
},**
)
and my program launched from package file now just displayed fine.
So it's all about the **NSHighResolutionCapable** option is disabled or not set by default.
Though my problem is solved, the reason why I still want to open this Issue is, **now it's 2019, in the MacWorld, Retina or High Resolution is already the mainstream,** almost all the Mac products are equipped with a Retina display, and people buy mac for its crisp display.
Even if you are using an external display, now the 4k is the mainstream, there's no reason to hook a low resolution display especially on a Mac except you already have one and want to still use it.
So I suggest, make this option a DEFAULT in Mac OS version, since the command line executable displays in a high res mode, there's no reason why the App Package not. And high resolution is always what most folks expected.
Make high resolution a default, no more tweaks, no more searches.
Consider this!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/building/osx.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import os
13 import plistlib
14 import shutil
15 from ..compat import is_darwin
16 from .api import EXE, COLLECT
17 from .datastruct import Target, TOC, logger
18 from .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache
19
20
21
22 class BUNDLE(Target):
23 def __init__(self, *args, **kws):
24 from ..config import CONF
25
26 # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms
27 if not is_darwin:
28 return
29
30 # get a path to a .icns icon for the app bundle.
31 self.icon = kws.get('icon')
32 if not self.icon:
33 # --icon not specified; use the default in the pyinstaller folder
34 self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),
35 'bootloader', 'images', 'icon-windowed.icns')
36 else:
37 # user gave an --icon=path. If it is relative, make it
38 # relative to the spec file location.
39 if not os.path.isabs(self.icon):
40 self.icon = os.path.join(CONF['specpath'], self.icon)
41 # ensure icon path is absolute
42 self.icon = os.path.abspath(self.icon)
43
44 Target.__init__(self)
45
46 # .app bundle is created in DISTPATH.
47 self.name = kws.get('name', None)
48 base_name = os.path.basename(self.name)
49 self.name = os.path.join(CONF['distpath'], base_name)
50
51 self.appname = os.path.splitext(base_name)[0]
52 self.version = kws.get("version", "0.0.0")
53 self.toc = TOC()
54 self.strip = False
55 self.upx = False
56 self.console = True
57
58 # .app bundle identifier for Code Signing
59 self.bundle_identifier = kws.get('bundle_identifier')
60 if not self.bundle_identifier:
61 # Fallback to appname.
62 self.bundle_identifier = self.appname
63
64 self.info_plist = kws.get('info_plist', None)
65
66 for arg in args:
67 if isinstance(arg, EXE):
68 self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
69 self.toc.extend(arg.dependencies)
70 self.strip = arg.strip
71 self.upx = arg.upx
72 self.upx_exclude = arg.upx_exclude
73 self.console = arg.console
74 elif isinstance(arg, TOC):
75 self.toc.extend(arg)
76 # TOC doesn't have a strip or upx attribute, so there is no way for us to
77 # tell which cache we should draw from.
78 elif isinstance(arg, COLLECT):
79 self.toc.extend(arg.toc)
80 self.strip = arg.strip_binaries
81 self.upx = arg.upx_binaries
82 self.upx_exclude = arg.upx_exclude
83 self.console = arg.console
84 else:
85 logger.info("unsupported entry %s", arg.__class__.__name__)
86 # Now, find values for app filepath (name), app name (appname), and name
87 # of the actual executable (exename) from the first EXECUTABLE item in
88 # toc, which might have come from a COLLECT too (not from an EXE).
89 for inm, name, typ in self.toc:
90 if typ == "EXECUTABLE":
91 self.exename = name
92 if self.name is None:
93 self.appname = "Mac%s" % (os.path.splitext(inm)[0],)
94 self.name = os.path.join(CONF['specpath'], self.appname + ".app")
95 else:
96 self.name = os.path.join(CONF['specpath'], self.name)
97 break
98 self.__postinit__()
99
100 _GUTS = (
101 # BUNDLE always builds, just want the toc to be written out
102 ('toc', None),
103 )
104
105 def _check_guts(self, data, last_build):
106 # BUNDLE always needs to be executed, since it will clean the output
107 # directory anyway to make sure there is no existing cruft accumulating
108 return 1
109
110 def assemble(self):
111 if _check_path_overlap(self.name) and os.path.isdir(self.name):
112 _rmtree(self.name)
113 logger.info("Building BUNDLE %s", self.tocbasename)
114
115 # Create a minimal Mac bundle structure
116 os.makedirs(os.path.join(self.name, "Contents", "MacOS"))
117 os.makedirs(os.path.join(self.name, "Contents", "Resources"))
118 os.makedirs(os.path.join(self.name, "Contents", "Frameworks"))
119
120 # Copy icns icon to Resources directory.
121 if os.path.exists(self.icon):
122 shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))
123 else:
124 logger.warning("icon not found %s", self.icon)
125
126 # Key/values for a minimal Info.plist file
127 info_plist_dict = {"CFBundleDisplayName": self.appname,
128 "CFBundleName": self.appname,
129
130 # Required by 'codesign' utility.
131 # The value for CFBundleIdentifier is used as the default unique
132 # name of your program for Code Signing purposes.
133 # It even identifies the APP for access to restricted OS X areas
134 # like Keychain.
135 #
136 # The identifier used for signing must be globally unique. The usal
137 # form for this identifier is a hierarchical name in reverse DNS
138 # notation, starting with the toplevel domain, followed by the
139 # company name, followed by the department within the company, and
140 # ending with the product name. Usually in the form:
141 # com.mycompany.department.appname
142 # Cli option --osx-bundle-identifier sets this value.
143 "CFBundleIdentifier": self.bundle_identifier,
144
145 # Fix for #156 - 'MacOS' must be in the name - not sure why
146 "CFBundleExecutable": 'MacOS/%s' % os.path.basename(self.exename),
147 "CFBundleIconFile": os.path.basename(self.icon),
148 "CFBundleInfoDictionaryVersion": "6.0",
149 "CFBundlePackageType": "APPL",
150 "CFBundleShortVersionString": self.version,
151
152 }
153
154 # Setting EXE console=True implies LSBackgroundOnly=True.
155 # But it still can be overwrite by the user.
156 if self.console:
157 info_plist_dict['LSBackgroundOnly'] = True
158
159 # Merge info_plist settings from spec file
160 if isinstance(self.info_plist, dict) and self.info_plist:
161 info_plist_dict.update(self.info_plist)
162
163 plist_filename = os.path.join(self.name, "Contents", "Info.plist")
164 with open(plist_filename, "wb") as plist_fh:
165 plistlib.dump(info_plist_dict, plist_fh)
166
167 links = []
168 toc = add_suffix_to_extensions(self.toc)
169 for inm, fnm, typ in toc:
170 # Copy files from cache. This ensures that are used files with relative
171 # paths to dynamic library dependencies (@executable_path)
172 base_path = inm.split('/', 1)[0]
173 if typ in ('EXTENSION', 'BINARY'):
174 fnm = checkCache(fnm, strip=self.strip, upx=self.upx,
175 upx_exclude=self.upx_exclude, dist_nm=inm)
176 # Add most data files to a list for symlinking later.
177 if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):
178 links.append((inm, fnm))
179 else:
180 tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
181 todir = os.path.dirname(tofnm)
182 if not os.path.exists(todir):
183 os.makedirs(todir)
184 if os.path.isdir(fnm):
185 # beacuse shutil.copy2() is the default copy function
186 # for shutil.copytree, this will also copy file metadata
187 shutil.copytree(fnm, tofnm)
188 else:
189 shutil.copy(fnm, tofnm)
190
191 logger.info('moving BUNDLE data files to Resource directory')
192
193 # Mac OS X Code Signing does not work when .app bundle contains
194 # data files in dir ./Contents/MacOS.
195 #
196 # Put all data files in ./Resources and create symlinks in ./MacOS.
197 bin_dir = os.path.join(self.name, 'Contents', 'MacOS')
198 res_dir = os.path.join(self.name, 'Contents', 'Resources')
199 for inm, fnm in links:
200 tofnm = os.path.join(res_dir, inm)
201 todir = os.path.dirname(tofnm)
202 if not os.path.exists(todir):
203 os.makedirs(todir)
204 if os.path.isdir(fnm):
205 # beacuse shutil.copy2() is the default copy function
206 # for shutil.copytree, this will also copy file metadata
207 shutil.copytree(fnm, tofnm)
208 else:
209 shutil.copy(fnm, tofnm)
210 base_path = os.path.split(inm)[0]
211 if base_path:
212 if not os.path.exists(os.path.join(bin_dir, inm)):
213 path = ''
214 for part in iter(base_path.split(os.path.sep)):
215 # Build path from previous path and the next part of the base path
216 path = os.path.join(path, part)
217 try:
218 relative_source_path = os.path.relpath(os.path.join(res_dir, path),
219 os.path.split(os.path.join(bin_dir, path))[0])
220 dest_path = os.path.join(bin_dir, path)
221 os.symlink(relative_source_path, dest_path)
222 break
223 except FileExistsError:
224 pass
225 if not os.path.exists(os.path.join(bin_dir, inm)):
226 relative_source_path = os.path.relpath(os.path.join(res_dir, inm),
227 os.path.split(os.path.join(bin_dir, inm))[0])
228 dest_path = os.path.join(bin_dir, inm)
229 os.symlink(relative_source_path, dest_path)
230 else: # If path is empty, e.g., a top level file, try to just symlink the file
231 os.symlink(os.path.relpath(os.path.join(res_dir, inm),
232 os.path.split(os.path.join(bin_dir, inm))[0]),
233 os.path.join(bin_dir, inm))
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/building/osx.py b/PyInstaller/building/osx.py
--- a/PyInstaller/building/osx.py
+++ b/PyInstaller/building/osx.py
@@ -151,10 +151,14 @@
}
- # Setting EXE console=True implies LSBackgroundOnly=True.
- # But it still can be overwrite by the user.
+ # Set some default values.
+ # But they still can be overwritten by the user.
if self.console:
+ # Setting EXE console=True implies LSBackgroundOnly=True.
info_plist_dict['LSBackgroundOnly'] = True
+ else:
+ # Let's use high resolution by default.
+ info_plist_dict['NSHighResolutionCapable'] = True
# Merge info_plist settings from spec file
if isinstance(self.info_plist, dict) and self.info_plist:
| {"golden_diff": "diff --git a/PyInstaller/building/osx.py b/PyInstaller/building/osx.py\n--- a/PyInstaller/building/osx.py\n+++ b/PyInstaller/building/osx.py\n@@ -151,10 +151,14 @@\n \n }\n \n- # Setting EXE console=True implies LSBackgroundOnly=True.\n- # But it still can be overwrite by the user.\n+ # Set some default values.\n+ # But they still can be overwritten by the user.\n if self.console:\n+ # Setting EXE console=True implies LSBackgroundOnly=True.\n info_plist_dict['LSBackgroundOnly'] = True\n+ else:\n+ # Let's use high resolution by default.\n+ info_plist_dict['NSHighResolutionCapable'] = True\n \n # Merge info_plist settings from spec file\n if isinstance(self.info_plist, dict) and self.info_plist:\n", "issue": "MacOS onefile package, GUI does not display in High Resolution mode by default. Now it's 2019, so I suggest make the 'NSHighResolutionCapable' option a default.\nHi dear developers,\r\n\r\nI found a small issue on mac os x, after running the command 'pyinstaller --onefile --noconsole myscript.py', there are 2 generated files, one is a Unix executable, the other one is a mac os APP PACKAGE. \r\n\r\nMy program uses Tkinter for GUI, When the program is launched from the unix executable, it displays in the high resolution mode like a normal mac os app. But when launched from the APP Package, it will display in a low resolution mode, which looks really strange on today's Mac.\r\n\r\nAfter a little search, I found this topic in Stackoverflow, https://stackoverflow.com/questions/40484659/pyinstaller-tkinter-window-low-resolution-in-app-bundle-but-not-in-app-program , added some configs as this topic suggests:\r\n\r\napp = BUNDLE(exe,\r\n name='myscript.app',\r\n icon=None,\r\n bundle_identifier=None\r\n **info_plist={\r\n 'NSHighResolutionCapable': 'True'\r\n },**\r\n )\r\n\r\nand my program launched from package file now just displayed fine.\r\nSo it's all about the **NSHighResolutionCapable** option is disabled or not set by default.\r\n\r\n\r\nThough my problem is solved, the reason why I still want to open this Issue is, **now it's 2019, in the MacWorld, Retina or High Resolution is already the mainstream,** almost all the Mac products are equipped with a Retina display, and people buy mac for its crisp display. \r\nEven if you are using an external display, now the 4k is the mainstream, there's no reason to hook a low resolution display especially on a Mac except you already have one and want to still use it.\r\n\r\nSo I suggest, make this option a DEFAULT in Mac OS version, since the command line executable displays in a high res mode, there's no reason why the App Package not. And high resolution is always what most folks expected. \r\n\r\nMake high resolution a default, no more tweaks, no more searches.\r\n\r\nConsider this!\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport os\nimport plistlib\nimport shutil\nfrom ..compat import is_darwin\nfrom .api import EXE, COLLECT\nfrom .datastruct import Target, TOC, logger\nfrom .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache\n\n\n\nclass BUNDLE(Target):\n def __init__(self, *args, **kws):\n from ..config import CONF\n\n # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms\n if not is_darwin:\n return\n\n # get a path to a .icns icon for the app bundle.\n self.icon = kws.get('icon')\n if not self.icon:\n # --icon not specified; use the default in the pyinstaller folder\n self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'bootloader', 'images', 'icon-windowed.icns')\n else:\n # user gave an --icon=path. If it is relative, make it\n # relative to the spec file location.\n if not os.path.isabs(self.icon):\n self.icon = os.path.join(CONF['specpath'], self.icon)\n # ensure icon path is absolute\n self.icon = os.path.abspath(self.icon)\n\n Target.__init__(self)\n\n # .app bundle is created in DISTPATH.\n self.name = kws.get('name', None)\n base_name = os.path.basename(self.name)\n self.name = os.path.join(CONF['distpath'], base_name)\n\n self.appname = os.path.splitext(base_name)[0]\n self.version = kws.get(\"version\", \"0.0.0\")\n self.toc = TOC()\n self.strip = False\n self.upx = False\n self.console = True\n\n # .app bundle identifier for Code Signing\n self.bundle_identifier = kws.get('bundle_identifier')\n if not self.bundle_identifier:\n # Fallback to appname.\n self.bundle_identifier = self.appname\n\n self.info_plist = kws.get('info_plist', None)\n\n for arg in args:\n if isinstance(arg, EXE):\n self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))\n self.toc.extend(arg.dependencies)\n self.strip = arg.strip\n self.upx = arg.upx\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n elif isinstance(arg, TOC):\n self.toc.extend(arg)\n # TOC doesn't have a strip or upx attribute, so there is no way for us to\n # tell which cache we should draw from.\n elif isinstance(arg, COLLECT):\n self.toc.extend(arg.toc)\n self.strip = arg.strip_binaries\n self.upx = arg.upx_binaries\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n else:\n logger.info(\"unsupported entry %s\", arg.__class__.__name__)\n # Now, find values for app filepath (name), app name (appname), and name\n # of the actual executable (exename) from the first EXECUTABLE item in\n # toc, which might have come from a COLLECT too (not from an EXE).\n for inm, name, typ in self.toc:\n if typ == \"EXECUTABLE\":\n self.exename = name\n if self.name is None:\n self.appname = \"Mac%s\" % (os.path.splitext(inm)[0],)\n self.name = os.path.join(CONF['specpath'], self.appname + \".app\")\n else:\n self.name = os.path.join(CONF['specpath'], self.name)\n break\n self.__postinit__()\n\n _GUTS = (\n # BUNDLE always builds, just want the toc to be written out\n ('toc', None),\n )\n\n def _check_guts(self, data, last_build):\n # BUNDLE always needs to be executed, since it will clean the output\n # directory anyway to make sure there is no existing cruft accumulating\n return 1\n\n def assemble(self):\n if _check_path_overlap(self.name) and os.path.isdir(self.name):\n _rmtree(self.name)\n logger.info(\"Building BUNDLE %s\", self.tocbasename)\n\n # Create a minimal Mac bundle structure\n os.makedirs(os.path.join(self.name, \"Contents\", \"MacOS\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Resources\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Frameworks\"))\n\n # Copy icns icon to Resources directory.\n if os.path.exists(self.icon):\n shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))\n else:\n logger.warning(\"icon not found %s\", self.icon)\n\n # Key/values for a minimal Info.plist file\n info_plist_dict = {\"CFBundleDisplayName\": self.appname,\n \"CFBundleName\": self.appname,\n\n # Required by 'codesign' utility.\n # The value for CFBundleIdentifier is used as the default unique\n # name of your program for Code Signing purposes.\n # It even identifies the APP for access to restricted OS X areas\n # like Keychain.\n #\n # The identifier used for signing must be globally unique. The usal\n # form for this identifier is a hierarchical name in reverse DNS\n # notation, starting with the toplevel domain, followed by the\n # company name, followed by the department within the company, and\n # ending with the product name. Usually in the form:\n # com.mycompany.department.appname\n # Cli option --osx-bundle-identifier sets this value.\n \"CFBundleIdentifier\": self.bundle_identifier,\n\n # Fix for #156 - 'MacOS' must be in the name - not sure why\n \"CFBundleExecutable\": 'MacOS/%s' % os.path.basename(self.exename),\n \"CFBundleIconFile\": os.path.basename(self.icon),\n \"CFBundleInfoDictionaryVersion\": \"6.0\",\n \"CFBundlePackageType\": \"APPL\",\n \"CFBundleShortVersionString\": self.version,\n\n }\n\n # Setting EXE console=True implies LSBackgroundOnly=True.\n # But it still can be overwrite by the user.\n if self.console:\n info_plist_dict['LSBackgroundOnly'] = True\n\n # Merge info_plist settings from spec file\n if isinstance(self.info_plist, dict) and self.info_plist:\n info_plist_dict.update(self.info_plist)\n\n plist_filename = os.path.join(self.name, \"Contents\", \"Info.plist\")\n with open(plist_filename, \"wb\") as plist_fh:\n plistlib.dump(info_plist_dict, plist_fh)\n\n links = []\n toc = add_suffix_to_extensions(self.toc)\n for inm, fnm, typ in toc:\n # Copy files from cache. This ensures that are used files with relative\n # paths to dynamic library dependencies (@executable_path)\n base_path = inm.split('/', 1)[0]\n if typ in ('EXTENSION', 'BINARY'):\n fnm = checkCache(fnm, strip=self.strip, upx=self.upx,\n upx_exclude=self.upx_exclude, dist_nm=inm)\n # Add most data files to a list for symlinking later.\n if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):\n links.append((inm, fnm))\n else:\n tofnm = os.path.join(self.name, \"Contents\", \"MacOS\", inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n\n logger.info('moving BUNDLE data files to Resource directory')\n\n # Mac OS X Code Signing does not work when .app bundle contains\n # data files in dir ./Contents/MacOS.\n #\n # Put all data files in ./Resources and create symlinks in ./MacOS.\n bin_dir = os.path.join(self.name, 'Contents', 'MacOS')\n res_dir = os.path.join(self.name, 'Contents', 'Resources')\n for inm, fnm in links:\n tofnm = os.path.join(res_dir, inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n base_path = os.path.split(inm)[0]\n if base_path:\n if not os.path.exists(os.path.join(bin_dir, inm)):\n path = ''\n for part in iter(base_path.split(os.path.sep)):\n # Build path from previous path and the next part of the base path\n path = os.path.join(path, part)\n try:\n relative_source_path = os.path.relpath(os.path.join(res_dir, path),\n os.path.split(os.path.join(bin_dir, path))[0])\n dest_path = os.path.join(bin_dir, path)\n os.symlink(relative_source_path, dest_path)\n break\n except FileExistsError:\n pass\n if not os.path.exists(os.path.join(bin_dir, inm)):\n relative_source_path = os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0])\n dest_path = os.path.join(bin_dir, inm)\n os.symlink(relative_source_path, dest_path)\n else: # If path is empty, e.g., a top level file, try to just symlink the file\n os.symlink(os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0]),\n os.path.join(bin_dir, inm))\n", "path": "PyInstaller/building/osx.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport os\nimport plistlib\nimport shutil\nfrom ..compat import is_darwin\nfrom .api import EXE, COLLECT\nfrom .datastruct import Target, TOC, logger\nfrom .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache\n\n\n\nclass BUNDLE(Target):\n def __init__(self, *args, **kws):\n from ..config import CONF\n\n # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms\n if not is_darwin:\n return\n\n # get a path to a .icns icon for the app bundle.\n self.icon = kws.get('icon')\n if not self.icon:\n # --icon not specified; use the default in the pyinstaller folder\n self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'bootloader', 'images', 'icon-windowed.icns')\n else:\n # user gave an --icon=path. If it is relative, make it\n # relative to the spec file location.\n if not os.path.isabs(self.icon):\n self.icon = os.path.join(CONF['specpath'], self.icon)\n # ensure icon path is absolute\n self.icon = os.path.abspath(self.icon)\n\n Target.__init__(self)\n\n # .app bundle is created in DISTPATH.\n self.name = kws.get('name', None)\n base_name = os.path.basename(self.name)\n self.name = os.path.join(CONF['distpath'], base_name)\n\n self.appname = os.path.splitext(base_name)[0]\n self.version = kws.get(\"version\", \"0.0.0\")\n self.toc = TOC()\n self.strip = False\n self.upx = False\n self.console = True\n\n # .app bundle identifier for Code Signing\n self.bundle_identifier = kws.get('bundle_identifier')\n if not self.bundle_identifier:\n # Fallback to appname.\n self.bundle_identifier = self.appname\n\n self.info_plist = kws.get('info_plist', None)\n\n for arg in args:\n if isinstance(arg, EXE):\n self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))\n self.toc.extend(arg.dependencies)\n self.strip = arg.strip\n self.upx = arg.upx\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n elif isinstance(arg, TOC):\n self.toc.extend(arg)\n # TOC doesn't have a strip or upx attribute, so there is no way for us to\n # tell which cache we should draw from.\n elif isinstance(arg, COLLECT):\n self.toc.extend(arg.toc)\n self.strip = arg.strip_binaries\n self.upx = arg.upx_binaries\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n else:\n logger.info(\"unsupported entry %s\", arg.__class__.__name__)\n # Now, find values for app filepath (name), app name (appname), and name\n # of the actual executable (exename) from the first EXECUTABLE item in\n # toc, which might have come from a COLLECT too (not from an EXE).\n for inm, name, typ in self.toc:\n if typ == \"EXECUTABLE\":\n self.exename = name\n if self.name is None:\n self.appname = \"Mac%s\" % (os.path.splitext(inm)[0],)\n self.name = os.path.join(CONF['specpath'], self.appname + \".app\")\n else:\n self.name = os.path.join(CONF['specpath'], self.name)\n break\n self.__postinit__()\n\n _GUTS = (\n # BUNDLE always builds, just want the toc to be written out\n ('toc', None),\n )\n\n def _check_guts(self, data, last_build):\n # BUNDLE always needs to be executed, since it will clean the output\n # directory anyway to make sure there is no existing cruft accumulating\n return 1\n\n def assemble(self):\n if _check_path_overlap(self.name) and os.path.isdir(self.name):\n _rmtree(self.name)\n logger.info(\"Building BUNDLE %s\", self.tocbasename)\n\n # Create a minimal Mac bundle structure\n os.makedirs(os.path.join(self.name, \"Contents\", \"MacOS\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Resources\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Frameworks\"))\n\n # Copy icns icon to Resources directory.\n if os.path.exists(self.icon):\n shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))\n else:\n logger.warning(\"icon not found %s\", self.icon)\n\n # Key/values for a minimal Info.plist file\n info_plist_dict = {\"CFBundleDisplayName\": self.appname,\n \"CFBundleName\": self.appname,\n\n # Required by 'codesign' utility.\n # The value for CFBundleIdentifier is used as the default unique\n # name of your program for Code Signing purposes.\n # It even identifies the APP for access to restricted OS X areas\n # like Keychain.\n #\n # The identifier used for signing must be globally unique. The usal\n # form for this identifier is a hierarchical name in reverse DNS\n # notation, starting with the toplevel domain, followed by the\n # company name, followed by the department within the company, and\n # ending with the product name. Usually in the form:\n # com.mycompany.department.appname\n # Cli option --osx-bundle-identifier sets this value.\n \"CFBundleIdentifier\": self.bundle_identifier,\n\n # Fix for #156 - 'MacOS' must be in the name - not sure why\n \"CFBundleExecutable\": 'MacOS/%s' % os.path.basename(self.exename),\n \"CFBundleIconFile\": os.path.basename(self.icon),\n \"CFBundleInfoDictionaryVersion\": \"6.0\",\n \"CFBundlePackageType\": \"APPL\",\n \"CFBundleShortVersionString\": self.version,\n\n }\n\n # Set some default values.\n # But they still can be overwritten by the user.\n if self.console:\n # Setting EXE console=True implies LSBackgroundOnly=True.\n info_plist_dict['LSBackgroundOnly'] = True\n else:\n # Let's use high resolution by default.\n info_plist_dict['NSHighResolutionCapable'] = True\n\n # Merge info_plist settings from spec file\n if isinstance(self.info_plist, dict) and self.info_plist:\n info_plist_dict.update(self.info_plist)\n\n plist_filename = os.path.join(self.name, \"Contents\", \"Info.plist\")\n with open(plist_filename, \"wb\") as plist_fh:\n plistlib.dump(info_plist_dict, plist_fh)\n\n links = []\n toc = add_suffix_to_extensions(self.toc)\n for inm, fnm, typ in toc:\n # Copy files from cache. This ensures that are used files with relative\n # paths to dynamic library dependencies (@executable_path)\n base_path = inm.split('/', 1)[0]\n if typ in ('EXTENSION', 'BINARY'):\n fnm = checkCache(fnm, strip=self.strip, upx=self.upx,\n upx_exclude=self.upx_exclude, dist_nm=inm)\n # Add most data files to a list for symlinking later.\n if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):\n links.append((inm, fnm))\n else:\n tofnm = os.path.join(self.name, \"Contents\", \"MacOS\", inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n\n logger.info('moving BUNDLE data files to Resource directory')\n\n # Mac OS X Code Signing does not work when .app bundle contains\n # data files in dir ./Contents/MacOS.\n #\n # Put all data files in ./Resources and create symlinks in ./MacOS.\n bin_dir = os.path.join(self.name, 'Contents', 'MacOS')\n res_dir = os.path.join(self.name, 'Contents', 'Resources')\n for inm, fnm in links:\n tofnm = os.path.join(res_dir, inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n base_path = os.path.split(inm)[0]\n if base_path:\n if not os.path.exists(os.path.join(bin_dir, inm)):\n path = ''\n for part in iter(base_path.split(os.path.sep)):\n # Build path from previous path and the next part of the base path\n path = os.path.join(path, part)\n try:\n relative_source_path = os.path.relpath(os.path.join(res_dir, path),\n os.path.split(os.path.join(bin_dir, path))[0])\n dest_path = os.path.join(bin_dir, path)\n os.symlink(relative_source_path, dest_path)\n break\n except FileExistsError:\n pass\n if not os.path.exists(os.path.join(bin_dir, inm)):\n relative_source_path = os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0])\n dest_path = os.path.join(bin_dir, inm)\n os.symlink(relative_source_path, dest_path)\n else: # If path is empty, e.g., a top level file, try to just symlink the file\n os.symlink(os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0]),\n os.path.join(bin_dir, inm))\n", "path": "PyInstaller/building/osx.py"}]} | 3,664 | 202 |
gh_patches_debug_17879 | rasdani/github-patches | git_diff | conan-io__conan-center-index-14218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] qwt/6.2.0: Unresolvable conflict between requires and build_requires
### Description
Using https://github.com/EstebanDugueperoux2/qwtExample example, today I get following error:
```
WARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q
WARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q
ERROR: Conflict in qt/5.15.5:
'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.
To fix this conflict you need to override the package 'openssl' in your root package.
```
Even by overriding openssl dependency because it is a conflict between a require and a build_require.
See https://github.com/conan-io/conan/issues/4753.
### Package and Environment Details
* Package Name/Version: **qwt/6.2.0**
* Operating System+version: **conanio/gcc8**
* Compiler+version: **GCC 8**
* Docker image: **conanio/gcc8**
* Conan version: **conan 1.54.0**
* Python version: **Python 3.7.13**
### Conan profile
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=8
compiler.libcxx=libstdc++11
build_type=Release
# tests can be linked with gtest libraries only if using same build_type,
# see https://stackoverflow.com/questions/41408216/gtest-installed-with-conan-undefined-reference-to-testinginternalgetboolas/41409311#41409311
gtest:build_type=Debug
[options]
[build_requires]
[env]
### Steps to reproduce
docker run --rm -ti -v ${PWD}:/home/conan/project conanio/gcc8
cd project
export OPTIONS="--profile:build .conan/profiles/gcc8 --profile:host .conan/profiles/gcc8 -s qwt_example:build_type=Debug"
export PACKAGE_VERSION=`conan inspect . --raw version`
conan install . --install-folder=build --build missing $OPTIONS
### Logs
<details><summary>Click to expand log</summary>
```
WARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q
WARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q
ERROR: Conflict in qt/5.15.5:
'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.
To fix this conflict you need to override the package 'openssl' in your root package.
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/qwt/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
3 from conan.tools.files import apply_conandata_patches, copy, get, rmdir
4 from conan.tools.build import cross_building
5 from conan.errors import ConanInvalidConfiguration
6 import os
7
8 required_conan_version = ">=1.50"
9
10 class QwtConan(ConanFile):
11 name = "qwt"
12 license = "LGPL-2.1-or-later"
13 url = "https://github.com/conan-io/conan-center-index"
14 homepage = "https://qwt.sourceforge.io/"
15 topics = ("conan", "archive", "compression")
16 description = (
17 "The Qwt library contains GUI Components and utility classes which are primarily useful for programs "
18 "with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, "
19 "thermometers, wheels and knobs to control or display values, arrays, or ranges of type double."
20 )
21 settings = "os", "compiler", "build_type", "arch"
22 options = {
23 "shared": [True, False],
24 "fPIC": [True, False],
25 "plot": [True, False],
26 "widgets": [True, False],
27 "svg": [True, False],
28 "opengl": [True, False],
29 "designer": [True, False],
30 "polar": [True, False],
31 "playground": [True, False],
32 "examples": [True, False],
33 "test": [True, False],
34 }
35 default_options = {
36 "shared": False,
37 "fPIC": True,
38 "plot": True,
39 "widgets": True,
40 "svg": False,
41 "opengl": True,
42 "designer": False,
43 "polar": True,
44 "playground": False,
45 "examples": False,
46 "test": False
47 }
48
49 tool_requires = (
50 "cmake/3.23.2",
51 "ninja/1.11.0"
52 )
53
54 def _patch_sources(self):
55 apply_conandata_patches(self)
56
57 def export_sources(self):
58 for p in self.conan_data.get("patches", {}).get(self.version, []):
59 copy(self, p["patch_file"], self.recipe_folder, self.export_sources_folder)
60
61
62 def requirements(self):
63 self.requires("qt/5.15.5")
64
65 def build_requirements(self):
66 if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
67 self.build_requires("jom/1.1.3")
68 self.tool_requires("qt/5.15.5")
69
70 def validate(self):
71 if hasattr(self, "settings_build") and cross_building(self, skip_x64_x86=True):
72 raise ConanInvalidConfiguration("Qwt recipe does not support cross-compilation yet")
73
74 def config_options(self):
75 if self.settings.os == "Windows":
76 del self.options.fPIC
77
78 def configure(self):
79 if self.options.shared:
80 del self.options.fPIC
81
82 def source(self):
83 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
84
85 def layout(self):
86 cmake_layout(self)
87
88 def generate(self):
89 tc = CMakeToolchain(self, generator="Ninja")
90
91 tc.variables["QWT_DLL"] = "ON" if self.options.shared else "OFF"
92 tc.variables["QWT_STATIC "] = "ON" if not self.options.shared else "OFF"
93 tc.variables["QWT_PLOT"] = "ON" if self.options.plot else "OFF"
94 tc.variables["QWT_WIDGETS"] = "ON" if self.options.widgets else "OFF"
95 tc.variables["QWT_SVG"] = "ON" if self.options.svg else "OFF"
96 tc.variables["QWT_OPENGL"] = "ON" if self.options.opengl else "OFF"
97 tc.variables["QWT_DESIGNER"] = "ON" if self.options.designer else "OFF"
98 tc.variables["QWT_POLAR"] = "ON" if self.options.polar else "OFF"
99 tc.variables["QWT_BUILD_PLAYGROUND"] = "ON" if self.options.playground else "OFF"
100 tc.variables["QWT_BUILD_EXAMPLES"] = "ON" if self.options.examples else "OFF"
101 tc.variables["QWT_BUILD_TESTS"] = "ON" if self.options.test else "OFF"
102 tc.variables["QWT_FRAMEWORK"] = "OFF"
103
104 tc.generate()
105
106 deps = CMakeDeps(self)
107 deps.generate()
108
109 def build(self):
110 self._patch_sources()
111 cmake = CMake(self)
112 cmake.configure()
113 cmake.build()
114
115 if self.options.test:
116 cmake.test()
117
118 def package(self):
119 cmake = CMake(self)
120 cmake.install()
121 rmdir(self, f"{self.package_folder}/lib/pkgconfig")
122 rmdir(self, f"{self.package_folder}/lib/cmake")
123 self.copy("COPYING", src=self.folders.source, dst="licenses")
124
125 def package_info(self):
126 self.cpp_info.libs = ["qwt"]
127 self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'bin'))
128 self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'lib'))
129 self.cpp_info.defines = ['HAVE_QWT', 'QWT_DLL'] if self.options.shared else ['HAVE_QWT']
130 if not self.options.plot:
131 self.cpp_info.defines.append("NO_QWT_PLOT")
132 if not self.options.polar:
133 self.cpp_info.defines.append("NO_QWT_POLAR")
134 if not self.options.widgets:
135 self.cpp_info.defines.append("NO_QWT_WIDGETS")
136 if not self.options.opengl:
137 self.cpp_info.defines.append("NO_QWT_OPENGL")
138 if not self.options.svg:
139 self.cpp_info.defines.append("QWT_NO_SVG")
140
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/qwt/all/conanfile.py b/recipes/qwt/all/conanfile.py
--- a/recipes/qwt/all/conanfile.py
+++ b/recipes/qwt/all/conanfile.py
@@ -47,8 +47,8 @@
}
tool_requires = (
- "cmake/3.23.2",
- "ninja/1.11.0"
+ "cmake/3.24.2",
+ "ninja/1.11.1"
)
def _patch_sources(self):
@@ -60,12 +60,12 @@
def requirements(self):
- self.requires("qt/5.15.5")
+ self.requires("qt/5.15.7")
def build_requirements(self):
if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
self.build_requires("jom/1.1.3")
- self.tool_requires("qt/5.15.5")
+ self.tool_requires("qt/5.15.7")
def validate(self):
if hasattr(self, "settings_build") and cross_building(self, skip_x64_x86=True):
| {"golden_diff": "diff --git a/recipes/qwt/all/conanfile.py b/recipes/qwt/all/conanfile.py\n--- a/recipes/qwt/all/conanfile.py\n+++ b/recipes/qwt/all/conanfile.py\n@@ -47,8 +47,8 @@\n }\n \n tool_requires = (\n- \"cmake/3.23.2\",\n- \"ninja/1.11.0\"\n+ \"cmake/3.24.2\",\n+ \"ninja/1.11.1\"\n )\n \n def _patch_sources(self):\n@@ -60,12 +60,12 @@\n \n \n def requirements(self):\n- self.requires(\"qt/5.15.5\")\n+ self.requires(\"qt/5.15.7\")\n \n def build_requirements(self):\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"jom/1.1.3\")\n- self.tool_requires(\"qt/5.15.5\") \n+ self.tool_requires(\"qt/5.15.7\") \n \n def validate(self):\n if hasattr(self, \"settings_build\") and cross_building(self, skip_x64_x86=True):\n", "issue": "[package] qwt/6.2.0: Unresolvable conflict between requires and build_requires\n### Description\n\nUsing https://github.com/EstebanDugueperoux2/qwtExample example, today I get following error:\r\n\r\n```\r\nWARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q \r\nWARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q \r\nERROR: Conflict in qt/5.15.5:\r\n 'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.\r\n To fix this conflict you need to override the package 'openssl' in your root package.\r\n```\r\n\r\nEven by overriding openssl dependency because it is a conflict between a require and a build_require.\r\nSee https://github.com/conan-io/conan/issues/4753.\r\n\n\n### Package and Environment Details\n\n* Package Name/Version: **qwt/6.2.0**\r\n* Operating System+version: **conanio/gcc8**\r\n* Compiler+version: **GCC 8**\r\n* Docker image: **conanio/gcc8**\r\n* Conan version: **conan 1.54.0**\r\n* Python version: **Python 3.7.13**\r\n\n\n### Conan profile\n\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=8\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n# tests can be linked with gtest libraries only if using same build_type,\r\n# see https://stackoverflow.com/questions/41408216/gtest-installed-with-conan-undefined-reference-to-testinginternalgetboolas/41409311#41409311\r\ngtest:build_type=Debug\r\n[options]\r\n[build_requires]\r\n[env]\r\n\n\n### Steps to reproduce\n\ndocker run --rm -ti -v ${PWD}:/home/conan/project conanio/gcc8\r\ncd project\r\n\r\nexport OPTIONS=\"--profile:build .conan/profiles/gcc8 --profile:host .conan/profiles/gcc8 -s qwt_example:build_type=Debug\"\r\nexport PACKAGE_VERSION=`conan inspect . --raw version`\r\nconan install . --install-folder=build --build missing $OPTIONS\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nWARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q \r\nWARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q \r\nERROR: Conflict in qt/5.15.5:\r\n 'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.\r\n To fix this conflict you need to override the package 'openssl' in your root package.\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, copy, get, rmdir\nfrom conan.tools.build import cross_building\nfrom conan.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.50\"\n\nclass QwtConan(ConanFile):\n name = \"qwt\"\n license = \"LGPL-2.1-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://qwt.sourceforge.io/\"\n topics = (\"conan\", \"archive\", \"compression\")\n description = (\n \"The Qwt library contains GUI Components and utility classes which are primarily useful for programs \"\n \"with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, \"\n \"thermometers, wheels and knobs to control or display values, arrays, or ranges of type double.\"\n )\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"plot\": [True, False],\n \"widgets\": [True, False],\n \"svg\": [True, False],\n \"opengl\": [True, False],\n \"designer\": [True, False],\n \"polar\": [True, False],\n \"playground\": [True, False],\n \"examples\": [True, False],\n \"test\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"plot\": True,\n \"widgets\": True,\n \"svg\": False,\n \"opengl\": True,\n \"designer\": False,\n \"polar\": True,\n \"playground\": False,\n \"examples\": False,\n \"test\": False\n }\n\n tool_requires = (\n \"cmake/3.23.2\",\n \"ninja/1.11.0\"\n )\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n\n def export_sources(self):\n for p in self.conan_data.get(\"patches\", {}).get(self.version, []):\n copy(self, p[\"patch_file\"], self.recipe_folder, self.export_sources_folder)\n\n\n def requirements(self):\n self.requires(\"qt/5.15.5\")\n\n def build_requirements(self):\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"jom/1.1.3\")\n self.tool_requires(\"qt/5.15.5\") \n\n def validate(self):\n if hasattr(self, \"settings_build\") and cross_building(self, skip_x64_x86=True):\n raise ConanInvalidConfiguration(\"Qwt recipe does not support cross-compilation yet\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def layout(self):\n cmake_layout(self)\n\n def generate(self):\n tc = CMakeToolchain(self, generator=\"Ninja\")\n\n tc.variables[\"QWT_DLL\"] = \"ON\" if self.options.shared else \"OFF\"\n tc.variables[\"QWT_STATIC \"] = \"ON\" if not self.options.shared else \"OFF\"\n tc.variables[\"QWT_PLOT\"] = \"ON\" if self.options.plot else \"OFF\"\n tc.variables[\"QWT_WIDGETS\"] = \"ON\" if self.options.widgets else \"OFF\"\n tc.variables[\"QWT_SVG\"] = \"ON\" if self.options.svg else \"OFF\"\n tc.variables[\"QWT_OPENGL\"] = \"ON\" if self.options.opengl else \"OFF\"\n tc.variables[\"QWT_DESIGNER\"] = \"ON\" if self.options.designer else \"OFF\"\n tc.variables[\"QWT_POLAR\"] = \"ON\" if self.options.polar else \"OFF\"\n tc.variables[\"QWT_BUILD_PLAYGROUND\"] = \"ON\" if self.options.playground else \"OFF\"\n tc.variables[\"QWT_BUILD_EXAMPLES\"] = \"ON\" if self.options.examples else \"OFF\"\n tc.variables[\"QWT_BUILD_TESTS\"] = \"ON\" if self.options.test else \"OFF\"\n tc.variables[\"QWT_FRAMEWORK\"] = \"OFF\"\n\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n if self.options.test:\n cmake.test()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n rmdir(self, f\"{self.package_folder}/lib/pkgconfig\") \n rmdir(self, f\"{self.package_folder}/lib/cmake\")\n self.copy(\"COPYING\", src=self.folders.source, dst=\"licenses\")\n\n def package_info(self):\n self.cpp_info.libs = [\"qwt\"]\n self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'bin'))\n self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'lib'))\n self.cpp_info.defines = ['HAVE_QWT', 'QWT_DLL'] if self.options.shared else ['HAVE_QWT']\n if not self.options.plot:\n self.cpp_info.defines.append(\"NO_QWT_PLOT\")\n if not self.options.polar:\n self.cpp_info.defines.append(\"NO_QWT_POLAR\")\n if not self.options.widgets:\n self.cpp_info.defines.append(\"NO_QWT_WIDGETS\")\n if not self.options.opengl:\n self.cpp_info.defines.append(\"NO_QWT_OPENGL\")\n if not self.options.svg:\n self.cpp_info.defines.append(\"QWT_NO_SVG\")\n\n", "path": "recipes/qwt/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, copy, get, rmdir\nfrom conan.tools.build import cross_building\nfrom conan.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.50\"\n\nclass QwtConan(ConanFile):\n name = \"qwt\"\n license = \"LGPL-2.1-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://qwt.sourceforge.io/\"\n topics = (\"conan\", \"archive\", \"compression\")\n description = (\n \"The Qwt library contains GUI Components and utility classes which are primarily useful for programs \"\n \"with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, \"\n \"thermometers, wheels and knobs to control or display values, arrays, or ranges of type double.\"\n )\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"plot\": [True, False],\n \"widgets\": [True, False],\n \"svg\": [True, False],\n \"opengl\": [True, False],\n \"designer\": [True, False],\n \"polar\": [True, False],\n \"playground\": [True, False],\n \"examples\": [True, False],\n \"test\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"plot\": True,\n \"widgets\": True,\n \"svg\": False,\n \"opengl\": True,\n \"designer\": False,\n \"polar\": True,\n \"playground\": False,\n \"examples\": False,\n \"test\": False\n }\n\n tool_requires = (\n \"cmake/3.24.2\",\n \"ninja/1.11.1\"\n )\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n\n def export_sources(self):\n for p in self.conan_data.get(\"patches\", {}).get(self.version, []):\n copy(self, p[\"patch_file\"], self.recipe_folder, self.export_sources_folder)\n\n\n def requirements(self):\n self.requires(\"qt/5.15.7\")\n\n def build_requirements(self):\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"jom/1.1.3\")\n self.tool_requires(\"qt/5.15.7\") \n\n def validate(self):\n if hasattr(self, \"settings_build\") and cross_building(self, skip_x64_x86=True):\n raise ConanInvalidConfiguration(\"Qwt recipe does not support cross-compilation yet\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def layout(self):\n cmake_layout(self)\n\n def generate(self):\n tc = CMakeToolchain(self, generator=\"Ninja\")\n\n tc.variables[\"QWT_DLL\"] = \"ON\" if self.options.shared else \"OFF\"\n tc.variables[\"QWT_STATIC \"] = \"ON\" if not self.options.shared else \"OFF\"\n tc.variables[\"QWT_PLOT\"] = \"ON\" if self.options.plot else \"OFF\"\n tc.variables[\"QWT_WIDGETS\"] = \"ON\" if self.options.widgets else \"OFF\"\n tc.variables[\"QWT_SVG\"] = \"ON\" if self.options.svg else \"OFF\"\n tc.variables[\"QWT_OPENGL\"] = \"ON\" if self.options.opengl else \"OFF\"\n tc.variables[\"QWT_DESIGNER\"] = \"ON\" if self.options.designer else \"OFF\"\n tc.variables[\"QWT_POLAR\"] = \"ON\" if self.options.polar else \"OFF\"\n tc.variables[\"QWT_BUILD_PLAYGROUND\"] = \"ON\" if self.options.playground else \"OFF\"\n tc.variables[\"QWT_BUILD_EXAMPLES\"] = \"ON\" if self.options.examples else \"OFF\"\n tc.variables[\"QWT_BUILD_TESTS\"] = \"ON\" if self.options.test else \"OFF\"\n tc.variables[\"QWT_FRAMEWORK\"] = \"OFF\"\n\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n if self.options.test:\n cmake.test()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n rmdir(self, f\"{self.package_folder}/lib/pkgconfig\") \n rmdir(self, f\"{self.package_folder}/lib/cmake\")\n self.copy(\"COPYING\", src=self.folders.source, dst=\"licenses\")\n\n def package_info(self):\n self.cpp_info.libs = [\"qwt\"]\n self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'bin'))\n self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'lib'))\n self.cpp_info.defines = ['HAVE_QWT', 'QWT_DLL'] if self.options.shared else ['HAVE_QWT']\n if not self.options.plot:\n self.cpp_info.defines.append(\"NO_QWT_PLOT\")\n if not self.options.polar:\n self.cpp_info.defines.append(\"NO_QWT_POLAR\")\n if not self.options.widgets:\n self.cpp_info.defines.append(\"NO_QWT_WIDGETS\")\n if not self.options.opengl:\n self.cpp_info.defines.append(\"NO_QWT_OPENGL\")\n if not self.options.svg:\n self.cpp_info.defines.append(\"QWT_NO_SVG\")\n\n", "path": "recipes/qwt/all/conanfile.py"}]} | 2,643 | 277 |
gh_patches_debug_4877 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2042 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
updating progress doesn't work
Warning : i see this bug as unlinked with instance, because i saw it on my instance and on bookwyrm.social.
When i go to the book view, i have a block with two progress, but without any print of the pages i've read.

If i want to edit the progress to add pages number read, i have the choice between two dates and the number input. So i fill in the form. with only page number filled.

Then i'm going back to the book view, but with 3 entries instead of two. page number hasn't been saved/printed and it added a new progress instead of editing it.

One problem is you can't save a progress without a comment, but even with that fix, i think progress should indicate the pages read and the pages left to be read. a more "graphic' layout could be very cool ;)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/forms/forms.py`
Content:
```
1 """ using django model forms """
2 from django import forms
3 from django.forms import widgets
4 from django.utils.translation import gettext_lazy as _
5
6 from bookwyrm import models
7 from bookwyrm.models.user import FeedFilterChoices
8 from .custom_form import CustomForm
9
10
11 # pylint: disable=missing-class-docstring
12 class FeedStatusTypesForm(CustomForm):
13 class Meta:
14 model = models.User
15 fields = ["feed_status_types"]
16 help_texts = {f: None for f in fields}
17 widgets = {
18 "feed_status_types": widgets.CheckboxSelectMultiple(
19 choices=FeedFilterChoices,
20 ),
21 }
22
23
24 class ImportForm(forms.Form):
25 csv_file = forms.FileField()
26
27
28 class ShelfForm(CustomForm):
29 class Meta:
30 model = models.Shelf
31 fields = ["user", "name", "privacy", "description"]
32
33
34 class GoalForm(CustomForm):
35 class Meta:
36 model = models.AnnualGoal
37 fields = ["user", "year", "goal", "privacy"]
38
39
40 class ReportForm(CustomForm):
41 class Meta:
42 model = models.Report
43 fields = ["user", "reporter", "status", "links", "note"]
44
45
46 class ReadThroughForm(CustomForm):
47 def clean(self):
48 """make sure the email isn't in use by a registered user"""
49 cleaned_data = super().clean()
50 start_date = cleaned_data.get("start_date")
51 finish_date = cleaned_data.get("finish_date")
52 if start_date and finish_date and start_date > finish_date:
53 self.add_error(
54 "finish_date", _("Reading finish date cannot be before start date.")
55 )
56
57 class Meta:
58 model = models.ReadThrough
59 fields = ["user", "book", "start_date", "finish_date"]
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py
--- a/bookwyrm/forms/forms.py
+++ b/bookwyrm/forms/forms.py
@@ -45,7 +45,7 @@
class ReadThroughForm(CustomForm):
def clean(self):
- """make sure the email isn't in use by a registered user"""
+ """don't let readthroughs end before they start"""
cleaned_data = super().clean()
start_date = cleaned_data.get("start_date")
finish_date = cleaned_data.get("finish_date")
| {"golden_diff": "diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py\n--- a/bookwyrm/forms/forms.py\n+++ b/bookwyrm/forms/forms.py\n@@ -45,7 +45,7 @@\n \n class ReadThroughForm(CustomForm):\n def clean(self):\n- \"\"\"make sure the email isn't in use by a registered user\"\"\"\n+ \"\"\"don't let readthroughs end before they start\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n", "issue": "updating progress doesn't work\nWarning : i see this bug as unlinked with instance, because i saw it on my instance and on bookwyrm.social. \r\n\r\nWhen i go to the book view, i have a block with two progress, but without any print of the pages i've read. \r\n\r\n\r\nIf i want to edit the progress to add pages number read, i have the choice between two dates and the number input. So i fill in the form. with only page number filled.\r\n\r\n\r\nThen i'm going back to the book view, but with 3 entries instead of two. page number hasn't been saved/printed and it added a new progress instead of editing it.\r\n\r\n\r\nOne problem is you can't save a progress without a comment, but even with that fix, i think progress should indicate the pages read and the pages left to be read. a more \"graphic' layout could be very cool ;)\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\nfrom django.forms import widgets\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\nfrom bookwyrm.models.user import FeedFilterChoices\nfrom .custom_form import CustomForm\n\n\n# pylint: disable=missing-class-docstring\nclass FeedStatusTypesForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"feed_status_types\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"feed_status_types\": widgets.CheckboxSelectMultiple(\n choices=FeedFilterChoices,\n ),\n }\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\", \"description\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"status\", \"links\", \"note\"]\n\n\nclass ReadThroughForm(CustomForm):\n def clean(self):\n \"\"\"make sure the email isn't in use by a registered user\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n if start_date and finish_date and start_date > finish_date:\n self.add_error(\n \"finish_date\", _(\"Reading finish date cannot be before start date.\")\n )\n\n class Meta:\n model = models.ReadThrough\n fields = [\"user\", \"book\", \"start_date\", \"finish_date\"]\n", "path": "bookwyrm/forms/forms.py"}], "after_files": [{"content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\nfrom django.forms import widgets\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\nfrom bookwyrm.models.user import FeedFilterChoices\nfrom .custom_form import CustomForm\n\n\n# pylint: disable=missing-class-docstring\nclass FeedStatusTypesForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"feed_status_types\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"feed_status_types\": widgets.CheckboxSelectMultiple(\n choices=FeedFilterChoices,\n ),\n }\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\", \"description\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"status\", \"links\", \"note\"]\n\n\nclass ReadThroughForm(CustomForm):\n def clean(self):\n \"\"\"don't let readthroughs end before they start\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n if start_date and finish_date and start_date > finish_date:\n self.add_error(\n \"finish_date\", _(\"Reading finish date cannot be before start date.\")\n )\n\n class Meta:\n model = models.ReadThrough\n fields = [\"user\", \"book\", \"start_date\", \"finish_date\"]\n", "path": "bookwyrm/forms/forms.py"}]} | 1,129 | 122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.