problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_858 | rasdani/github-patches | git_diff | pypi__warehouse-3292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warehouse file order differs from legacy PyPI file list
Tonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed.
The specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list.
At the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/legacy/api/simple.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13
14 from packaging.version import parse
15 from pyramid.httpexceptions import HTTPMovedPermanently
16 from pyramid.view import view_config
17 from sqlalchemy import func
18 from sqlalchemy.orm import joinedload
19
20 from warehouse.cache.http import cache_control
21 from warehouse.cache.origin import origin_cache
22 from warehouse.packaging.models import JournalEntry, File, Project, Release
23
24
25 @view_config(
26 route_name="legacy.api.simple.index",
27 renderer="legacy/api/simple/index.html",
28 decorator=[
29 cache_control(10 * 60), # 10 minutes
30 origin_cache(
31 1 * 24 * 60 * 60, # 1 day
32 stale_while_revalidate=5 * 60, # 5 minutes
33 stale_if_error=1 * 24 * 60 * 60, # 1 day
34 ),
35 ],
36 )
37 def simple_index(request):
38 # Get the latest serial number
39 serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0
40 request.response.headers["X-PyPI-Last-Serial"] = str(serial)
41
42 # Fetch the name and normalized name for all of our projects
43 projects = (
44 request.db.query(Project.name, Project.normalized_name)
45 .order_by(Project.normalized_name)
46 .all()
47 )
48
49 return {"projects": projects}
50
51
52 @view_config(
53 route_name="legacy.api.simple.detail",
54 renderer="legacy/api/simple/detail.html",
55 decorator=[
56 cache_control(10 * 60), # 10 minutes
57 origin_cache(
58 1 * 24 * 60 * 60, # 1 day
59 stale_while_revalidate=5 * 60, # 5 minutes
60 stale_if_error=1 * 24 * 60 * 60, # 1 day
61 ),
62 ],
63 )
64 def simple_detail(project, request):
65 # TODO: Handle files which are not hosted on PyPI
66
67 # Make sure that we're using the normalized version of the URL.
68 if (project.normalized_name !=
69 request.matchdict.get("name", project.normalized_name)):
70 return HTTPMovedPermanently(
71 request.current_route_path(name=project.normalized_name),
72 )
73
74 # Get the latest serial number for this project.
75 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
76
77 # Get all of the files for this project.
78 files = sorted(
79 request.db.query(File)
80 .options(joinedload(File.release))
81 .filter(
82 File.name == project.name,
83 File.version.in_(
84 request.db.query(Release)
85 .filter(Release.project == project)
86 .with_entities(Release.version)
87 )
88 )
89 .all(),
90 key=lambda f: (parse(f.version), f.packagetype)
91 )
92
93 return {"project": project, "files": files}
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py
--- a/warehouse/legacy/api/simple.py
+++ b/warehouse/legacy/api/simple.py
@@ -87,7 +87,7 @@
)
)
.all(),
- key=lambda f: (parse(f.version), f.packagetype)
+ key=lambda f: (parse(f.version), f.filename)
)
return {"project": project, "files": files}
| {"golden_diff": "diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py\n--- a/warehouse/legacy/api/simple.py\n+++ b/warehouse/legacy/api/simple.py\n@@ -87,7 +87,7 @@\n )\n )\n .all(),\n- key=lambda f: (parse(f.version), f.packagetype)\n+ key=lambda f: (parse(f.version), f.filename)\n )\n \n return {\"project\": project, \"files\": files}\n", "issue": "Warehouse file order differs from legacy PyPI file list\nTonight, while load testing of pypi.org was ongoing, we saw some failures in automated systems that use `--require-hashes` with `pip install`, as ordering on the package file list page changed.\r\n\r\nThe specific package we saw break was `pandas` at version `0.12.0`. We had a single hash for `pandas-0.12.0.tar.gz`. A few of our hosts were served from the legacy PyPI service, which succeeded as normal. The Warehouse endpoint, however, failed, since `pandas-0.12.0.zip` now preceded `pandas-0.12.0.tar.gz` in the file list.\r\n\r\nAt the moment, you can see that https://pypi.org/simple/pandas/ and https://pypi.python.org/simple/pandas/ differ by searching for `pandas-0.12.0.tar.gz` and `pandas-0.12.0.zip` and comparing the position.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.packagetype)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.filename)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}]} | 1,403 | 105 |
gh_patches_debug_34797 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-657 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
precommit flake8 check is missing files
elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py is missed by the checker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/data/codec/__init__.py`
Content:
```
1 from elasticdl.python.data.codec.bytes_codec import BytesCodec
2 from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec
3
4 __all__ = [BytesCodec, TFExampleCodec]
5
```
Path: `elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py`
Content:
```
1 import argparse
2 import os
3
4 from pyspark import SparkContext
5 from pyspark import TaskContext
6
7 import numpy as np
8 from elasticdl.python.elasticdl.common.model_helper import load_user_model
9 from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \
10 convert_numpy_to_recordio
11
12
13 def process_data(
14 feature_label_columns,
15 single_file_preparation_func,
16 output_dir,
17 records_per_file,
18 codec_type,
19 ):
20 def _process_data(file_list):
21 ctx = TaskContext()
22 feature_list = []
23 label_list = []
24 for file in file_list:
25 feature_label_tuple = single_file_preparation_func(file)
26 assert len(feature_label_tuple) == 2
27 feature_list.append(feature_label_tuple[0])
28 label_list.append(feature_label_tuple[1])
29 convert_numpy_to_recordio(
30 output_dir,
31 np.array(feature_list),
32 np.array(label_list),
33 feature_label_columns,
34 records_per_file,
35 codec_type,
36 str(ctx.partitionId()),
37 )
38 return file_list
39 return _process_data
40
41
42 def main():
43 parser = argparse.ArgumentParser(
44 description="Spark job to convert training data to RecordIO format"
45 )
46 parser.add_argument(
47 "--training_data_dir",
48 help="Directory that contains training data and will be traversed \
49 recursively",
50 required=True,
51 )
52 parser.add_argument(
53 "--output_dir",
54 help="Directory of output RecordIO data",
55 required=True,
56 )
57 parser.add_argument(
58 "--model_file",
59 required=True,
60 help="User-defined model file which data processing logic is in",
61 )
62 parser.add_argument(
63 "--records_per_file", default=1024, type=int, help="Record per file"
64 )
65 parser.add_argument(
66 "--codec_type",
67 default="tf_example",
68 choices=["tf_example", "bytes"],
69 help="Type of codec(tf_example or bytes)",
70 )
71 parser.add_argument(
72 "--num_workers",
73 default=2,
74 type=int,
75 help="Number of workers of Spark job",
76 )
77
78 args = parser.parse_args()
79
80 # Get training data files recursively from training_data_dir
81 file_list = []
82 for dir_name, subdir_list, files in os.walk(args.training_data_dir):
83 for fname in files:
84 if not fname.startswith('.'):
85 file_list.append(os.path.join(dir_name, fname))
86
87 # Load user-defined model
88 model_module = load_user_model(args.model_file)
89
90 if not os.path.exists(args.output_dir):
91 os.makedirs(args.output_dir)
92
93 # Start the Spark job
94 sc = SparkContext()
95 rdd = sc.parallelize(file_list, args.num_workers)
96 rdd.mapPartitions(
97 process_data(
98 model_module.feature_columns() + model_module.label_columns(),
99 model_module.prepare_data_for_a_single_file,
100 args.output_dir,
101 args.records_per_file,
102 args.codec_type,
103 )
104 ).collect()
105
106
107 if __name__ == "__main__":
108 main()
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/data/codec/__init__.py b/elasticdl/python/data/codec/__init__.py
--- a/elasticdl/python/data/codec/__init__.py
+++ b/elasticdl/python/data/codec/__init__.py
@@ -1,4 +1,4 @@
from elasticdl.python.data.codec.bytes_codec import BytesCodec
from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec
-__all__ = [BytesCodec, TFExampleCodec]
+__all__ = ["BytesCodec", "TFExampleCodec"]
diff --git a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
--- a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
+++ b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py
@@ -6,8 +6,9 @@
import numpy as np
from elasticdl.python.elasticdl.common.model_helper import load_user_model
-from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \
- convert_numpy_to_recordio
+from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (
+ convert_numpy_to_recordio,
+)
def process_data(
@@ -36,6 +37,7 @@
str(ctx.partitionId()),
)
return file_list
+
return _process_data
@@ -50,9 +52,7 @@
required=True,
)
parser.add_argument(
- "--output_dir",
- help="Directory of output RecordIO data",
- required=True,
+ "--output_dir", help="Directory of output RecordIO data", required=True
)
parser.add_argument(
"--model_file",
@@ -81,12 +81,12 @@
file_list = []
for dir_name, subdir_list, files in os.walk(args.training_data_dir):
for fname in files:
- if not fname.startswith('.'):
+ if not fname.startswith("."):
file_list.append(os.path.join(dir_name, fname))
# Load user-defined model
model_module = load_user_model(args.model_file)
-
+
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
| {"golden_diff": "diff --git a/elasticdl/python/data/codec/__init__.py b/elasticdl/python/data/codec/__init__.py\n--- a/elasticdl/python/data/codec/__init__.py\n+++ b/elasticdl/python/data/codec/__init__.py\n@@ -1,4 +1,4 @@\n from elasticdl.python.data.codec.bytes_codec import BytesCodec\n from elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n \n-__all__ = [BytesCodec, TFExampleCodec]\n+__all__ = [\"BytesCodec\", \"TFExampleCodec\"]\ndiff --git a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n--- a/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n+++ b/elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py\n@@ -6,8 +6,9 @@\n \n import numpy as np\n from elasticdl.python.elasticdl.common.model_helper import load_user_model\n-from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \\\n- convert_numpy_to_recordio\n+from elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (\n+ convert_numpy_to_recordio,\n+)\n \n \n def process_data(\n@@ -36,6 +37,7 @@\n str(ctx.partitionId()),\n )\n return file_list\n+\n return _process_data\n \n \n@@ -50,9 +52,7 @@\n required=True,\n )\n parser.add_argument(\n- \"--output_dir\",\n- help=\"Directory of output RecordIO data\",\n- required=True,\n+ \"--output_dir\", help=\"Directory of output RecordIO data\", required=True\n )\n parser.add_argument(\n \"--model_file\",\n@@ -81,12 +81,12 @@\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n- if not fname.startswith('.'):\n+ if not fname.startswith(\".\"):\n file_list.append(os.path.join(dir_name, fname))\n \n # Load user-defined model\n model_module = load_user_model(args.model_file)\n- \n+\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n", "issue": "precommit flake8 check is missing files\nelasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py is missed by the checker.\n", "before_files": [{"content": "from elasticdl.python.data.codec.bytes_codec import BytesCodec\nfrom elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n\n__all__ = [BytesCodec, TFExampleCodec]\n", "path": "elasticdl/python/data/codec/__init__.py"}, {"content": "import argparse\nimport os\n\nfrom pyspark import SparkContext\nfrom pyspark import TaskContext\n\nimport numpy as np\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model\nfrom elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import \\\n convert_numpy_to_recordio\n\n\ndef process_data(\n feature_label_columns,\n single_file_preparation_func,\n output_dir,\n records_per_file,\n codec_type,\n):\n def _process_data(file_list):\n ctx = TaskContext()\n feature_list = []\n label_list = []\n for file in file_list:\n feature_label_tuple = single_file_preparation_func(file)\n assert len(feature_label_tuple) == 2\n feature_list.append(feature_label_tuple[0])\n label_list.append(feature_label_tuple[1])\n convert_numpy_to_recordio(\n output_dir,\n np.array(feature_list),\n np.array(label_list),\n feature_label_columns,\n records_per_file,\n codec_type,\n str(ctx.partitionId()),\n )\n return file_list\n return _process_data\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Spark job to convert training data to RecordIO format\"\n )\n parser.add_argument(\n \"--training_data_dir\",\n help=\"Directory that contains training data and will be traversed \\\n recursively\",\n required=True,\n )\n parser.add_argument(\n \"--output_dir\",\n help=\"Directory of output RecordIO data\",\n required=True,\n )\n parser.add_argument(\n \"--model_file\",\n required=True,\n help=\"User-defined model file which data processing logic is in\",\n )\n parser.add_argument(\n \"--records_per_file\", default=1024, type=int, help=\"Record per file\"\n )\n parser.add_argument(\n \"--codec_type\",\n default=\"tf_example\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=2,\n type=int,\n help=\"Number of workers of Spark job\",\n )\n\n args = parser.parse_args()\n\n # Get training data files recursively from training_data_dir\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n if not fname.startswith('.'):\n file_list.append(os.path.join(dir_name, fname))\n\n # Load user-defined model\n model_module = load_user_model(args.model_file)\n \n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Start the Spark job\n sc = SparkContext()\n rdd = sc.parallelize(file_list, args.num_workers)\n rdd.mapPartitions(\n process_data(\n model_module.feature_columns() + model_module.label_columns(),\n model_module.prepare_data_for_a_single_file,\n args.output_dir,\n args.records_per_file,\n args.codec_type,\n )\n ).collect()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py"}], "after_files": [{"content": "from elasticdl.python.data.codec.bytes_codec import BytesCodec\nfrom elasticdl.python.data.codec.tf_example_codec import TFExampleCodec\n\n__all__ = [\"BytesCodec\", \"TFExampleCodec\"]\n", "path": "elasticdl/python/data/codec/__init__.py"}, {"content": "import argparse\nimport os\n\nfrom pyspark import SparkContext\nfrom pyspark import TaskContext\n\nimport numpy as np\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model\nfrom elasticdl.python.data.recordio_gen.convert_numpy_to_recordio import (\n convert_numpy_to_recordio,\n)\n\n\ndef process_data(\n feature_label_columns,\n single_file_preparation_func,\n output_dir,\n records_per_file,\n codec_type,\n):\n def _process_data(file_list):\n ctx = TaskContext()\n feature_list = []\n label_list = []\n for file in file_list:\n feature_label_tuple = single_file_preparation_func(file)\n assert len(feature_label_tuple) == 2\n feature_list.append(feature_label_tuple[0])\n label_list.append(feature_label_tuple[1])\n convert_numpy_to_recordio(\n output_dir,\n np.array(feature_list),\n np.array(label_list),\n feature_label_columns,\n records_per_file,\n codec_type,\n str(ctx.partitionId()),\n )\n return file_list\n\n return _process_data\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Spark job to convert training data to RecordIO format\"\n )\n parser.add_argument(\n \"--training_data_dir\",\n help=\"Directory that contains training data and will be traversed \\\n recursively\",\n required=True,\n )\n parser.add_argument(\n \"--output_dir\", help=\"Directory of output RecordIO data\", required=True\n )\n parser.add_argument(\n \"--model_file\",\n required=True,\n help=\"User-defined model file which data processing logic is in\",\n )\n parser.add_argument(\n \"--records_per_file\", default=1024, type=int, help=\"Record per file\"\n )\n parser.add_argument(\n \"--codec_type\",\n default=\"tf_example\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=2,\n type=int,\n help=\"Number of workers of Spark job\",\n )\n\n args = parser.parse_args()\n\n # Get training data files recursively from training_data_dir\n file_list = []\n for dir_name, subdir_list, files in os.walk(args.training_data_dir):\n for fname in files:\n if not fname.startswith(\".\"):\n file_list.append(os.path.join(dir_name, fname))\n\n # Load user-defined model\n model_module = load_user_model(args.model_file)\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Start the Spark job\n sc = SparkContext()\n rdd = sc.parallelize(file_list, args.num_workers)\n rdd.mapPartitions(\n process_data(\n model_module.feature_columns() + model_module.label_columns(),\n model_module.prepare_data_for_a_single_file,\n args.output_dir,\n args.records_per_file,\n args.codec_type,\n )\n ).collect()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/data/recordio_gen/sample_pyspark_recordio_gen/spark_gen_recordio.py"}]} | 1,238 | 521 |
gh_patches_debug_16462 | rasdani/github-patches | git_diff | napari__napari-6268 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG `changing` event in `EventedDict` not emitted
## 🐛 Bug
The `changing` event in [`EventedDict`](https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L9) is created but not documented or emitted:
https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L75-L77
It seems not to be missed, should it be added?
<!-- A clear and concise description of what the bug is. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/events/containers/_evented_dict.py`
Content:
```
1 """MutableMapping that emits events when altered."""
2 from typing import Mapping, Optional, Sequence, Type, Union
3
4 from napari.utils.events.containers._dict import _K, _T, TypedMutableMapping
5 from napari.utils.events.event import EmitterGroup, Event
6 from napari.utils.events.types import SupportsEvents
7
8
9 class EventedDict(TypedMutableMapping[_K, _T]):
10 """Mutable dictionary that emits events when altered.
11
12 This class is designed to behave exactly like builtin ``dict``, but
13 will emit events before and after all mutations (addition, removal, and
14 changing).
15
16 Parameters
17 ----------
18 data : Mapping, optional
19 Dictionary to initialize the class with.
20 basetype : type of sequence of types, optional
21 Type of the element in the dictionary.
22
23 Events
24 ------
25 changed (key: K, old_value: T, value: T)
26 emitted when item at ``key`` is changed from ``old_value`` to ``value``
27 adding (key: K)
28 emitted before an item is added to the dictionary with ``key``
29 added (key: K, value: T)
30 emitted after ``value`` was added to the dictionary with ``key``
31 removing (key: K)
32 emitted before ``key`` is removed from the dictionary
33 removed (key: K, value: T)
34 emitted after ``key`` was removed from the dictionary
35 updated (key, K, value: T)
36 emitted after ``value`` of ``key`` was changed. Only implemented by
37 subclasses to give them an option to trigger some update after ``value``
38 was changed and this class did not register it. This can be useful if
39 the ``basetype`` is not an evented object.
40 """
41
42 events: EmitterGroup
43
44 def __init__(
45 self,
46 data: Optional[Mapping[_K, _T]] = None,
47 basetype: Union[Type[_T], Sequence[Type[_T]]] = (),
48 ) -> None:
49 _events = {
50 "changing": None,
51 "changed": None,
52 "adding": None,
53 "added": None,
54 "removing": None,
55 "removed": None,
56 "updated": None,
57 }
58 # For inheritance: If the mro already provides an EmitterGroup, add...
59 if hasattr(self, "events") and isinstance(self.events, EmitterGroup):
60 self.events.add(**_events)
61 else:
62 # otherwise create a new one
63 self.events = EmitterGroup(
64 source=self, auto_connect=False, **_events
65 )
66 super().__init__(data, basetype)
67
68 def __setitem__(self, key: _K, value: _T):
69 old = self._dict.get(key)
70 if value is old or value == old:
71 return
72 if old is None:
73 self.events.adding(key=key)
74 super().__setitem__(key, value)
75 self.events.added(key=key, value=value)
76 self._connect_child_emitters(value)
77 else:
78 super().__setitem__(key, value)
79 self.events.changed(key=key, old_value=old, value=value)
80
81 def __delitem__(self, key: _K):
82 self.events.removing(key=key)
83 self._disconnect_child_emitters(self[key])
84 item = self._dict.pop(key)
85 self.events.removed(key=key, value=item)
86
87 def _reemit_child_event(self, event: Event):
88 """An item in the dict emitted an event. Re-emit with key"""
89 if not hasattr(event, "key"):
90 event.key = self.key(event.source)
91
92 # re-emit with this object's EventEmitter
93 self.events(event)
94
95 def _disconnect_child_emitters(self, child: _T):
96 """Disconnect all events from the child from the re-emitter."""
97 if isinstance(child, SupportsEvents):
98 child.events.disconnect(self._reemit_child_event)
99
100 def _connect_child_emitters(self, child: _T):
101 """Connect all events from the child to be re-emitted."""
102 if isinstance(child, SupportsEvents):
103 # make sure the event source has been set on the child
104 if child.events.source is None:
105 child.events.source = child
106 child.events.connect(self._reemit_child_event)
107
108 def key(self, value: _T):
109 """Return first instance of value."""
110 for k, v in self._dict.items():
111 if v is value or v == value:
112 return k
113 return None
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/events/containers/_evented_dict.py b/napari/utils/events/containers/_evented_dict.py
--- a/napari/utils/events/containers/_evented_dict.py
+++ b/napari/utils/events/containers/_evented_dict.py
@@ -22,6 +22,8 @@
Events
------
+ changing (key: K)
+ emitted before an item at ``key`` is changed
changed (key: K, old_value: T, value: T)
emitted when item at ``key`` is changed from ``old_value`` to ``value``
adding (key: K)
@@ -75,6 +77,7 @@
self.events.added(key=key, value=value)
self._connect_child_emitters(value)
else:
+ self.events.changing(key=key)
super().__setitem__(key, value)
self.events.changed(key=key, old_value=old, value=value)
| {"golden_diff": "diff --git a/napari/utils/events/containers/_evented_dict.py b/napari/utils/events/containers/_evented_dict.py\n--- a/napari/utils/events/containers/_evented_dict.py\n+++ b/napari/utils/events/containers/_evented_dict.py\n@@ -22,6 +22,8 @@\n \n Events\n ------\n+ changing (key: K)\n+ emitted before an item at ``key`` is changed\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n@@ -75,6 +77,7 @@\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n+ self.events.changing(key=key)\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n", "issue": "BUG `changing` event in `EventedDict` not emitted\n## \ud83d\udc1b Bug\r\n\r\nThe `changing` event in [`EventedDict`](https://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L9) is created but not documented or emitted:\r\n\r\nhttps://github.com/napari/napari/blob/8c307022cc557692409f5e8bc031f1dcde4c374a/napari/utils/events/containers/_evented_dict.py#L75-L77\r\n\r\nIt seems not to be missed, should it be added?\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"MutableMapping that emits events when altered.\"\"\"\nfrom typing import Mapping, Optional, Sequence, Type, Union\n\nfrom napari.utils.events.containers._dict import _K, _T, TypedMutableMapping\nfrom napari.utils.events.event import EmitterGroup, Event\nfrom napari.utils.events.types import SupportsEvents\n\n\nclass EventedDict(TypedMutableMapping[_K, _T]):\n \"\"\"Mutable dictionary that emits events when altered.\n\n This class is designed to behave exactly like builtin ``dict``, but\n will emit events before and after all mutations (addition, removal, and\n changing).\n\n Parameters\n ----------\n data : Mapping, optional\n Dictionary to initialize the class with.\n basetype : type of sequence of types, optional\n Type of the element in the dictionary.\n\n Events\n ------\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n emitted before an item is added to the dictionary with ``key``\n added (key: K, value: T)\n emitted after ``value`` was added to the dictionary with ``key``\n removing (key: K)\n emitted before ``key`` is removed from the dictionary\n removed (key: K, value: T)\n emitted after ``key`` was removed from the dictionary\n updated (key, K, value: T)\n emitted after ``value`` of ``key`` was changed. Only implemented by\n subclasses to give them an option to trigger some update after ``value``\n was changed and this class did not register it. This can be useful if\n the ``basetype`` is not an evented object.\n \"\"\"\n\n events: EmitterGroup\n\n def __init__(\n self,\n data: Optional[Mapping[_K, _T]] = None,\n basetype: Union[Type[_T], Sequence[Type[_T]]] = (),\n ) -> None:\n _events = {\n \"changing\": None,\n \"changed\": None,\n \"adding\": None,\n \"added\": None,\n \"removing\": None,\n \"removed\": None,\n \"updated\": None,\n }\n # For inheritance: If the mro already provides an EmitterGroup, add...\n if hasattr(self, \"events\") and isinstance(self.events, EmitterGroup):\n self.events.add(**_events)\n else:\n # otherwise create a new one\n self.events = EmitterGroup(\n source=self, auto_connect=False, **_events\n )\n super().__init__(data, basetype)\n\n def __setitem__(self, key: _K, value: _T):\n old = self._dict.get(key)\n if value is old or value == old:\n return\n if old is None:\n self.events.adding(key=key)\n super().__setitem__(key, value)\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n\n def __delitem__(self, key: _K):\n self.events.removing(key=key)\n self._disconnect_child_emitters(self[key])\n item = self._dict.pop(key)\n self.events.removed(key=key, value=item)\n\n def _reemit_child_event(self, event: Event):\n \"\"\"An item in the dict emitted an event. Re-emit with key\"\"\"\n if not hasattr(event, \"key\"):\n event.key = self.key(event.source)\n\n # re-emit with this object's EventEmitter\n self.events(event)\n\n def _disconnect_child_emitters(self, child: _T):\n \"\"\"Disconnect all events from the child from the re-emitter.\"\"\"\n if isinstance(child, SupportsEvents):\n child.events.disconnect(self._reemit_child_event)\n\n def _connect_child_emitters(self, child: _T):\n \"\"\"Connect all events from the child to be re-emitted.\"\"\"\n if isinstance(child, SupportsEvents):\n # make sure the event source has been set on the child\n if child.events.source is None:\n child.events.source = child\n child.events.connect(self._reemit_child_event)\n\n def key(self, value: _T):\n \"\"\"Return first instance of value.\"\"\"\n for k, v in self._dict.items():\n if v is value or v == value:\n return k\n return None\n", "path": "napari/utils/events/containers/_evented_dict.py"}], "after_files": [{"content": "\"\"\"MutableMapping that emits events when altered.\"\"\"\nfrom typing import Mapping, Optional, Sequence, Type, Union\n\nfrom napari.utils.events.containers._dict import _K, _T, TypedMutableMapping\nfrom napari.utils.events.event import EmitterGroup, Event\nfrom napari.utils.events.types import SupportsEvents\n\n\nclass EventedDict(TypedMutableMapping[_K, _T]):\n \"\"\"Mutable dictionary that emits events when altered.\n\n This class is designed to behave exactly like builtin ``dict``, but\n will emit events before and after all mutations (addition, removal, and\n changing).\n\n Parameters\n ----------\n data : Mapping, optional\n Dictionary to initialize the class with.\n basetype : type of sequence of types, optional\n Type of the element in the dictionary.\n\n Events\n ------\n changing (key: K)\n emitted before an item at ``key`` is changed\n changed (key: K, old_value: T, value: T)\n emitted when item at ``key`` is changed from ``old_value`` to ``value``\n adding (key: K)\n emitted before an item is added to the dictionary with ``key``\n added (key: K, value: T)\n emitted after ``value`` was added to the dictionary with ``key``\n removing (key: K)\n emitted before ``key`` is removed from the dictionary\n removed (key: K, value: T)\n emitted after ``key`` was removed from the dictionary\n updated (key, K, value: T)\n emitted after ``value`` of ``key`` was changed. Only implemented by\n subclasses to give them an option to trigger some update after ``value``\n was changed and this class did not register it. This can be useful if\n the ``basetype`` is not an evented object.\n \"\"\"\n\n events: EmitterGroup\n\n def __init__(\n self,\n data: Optional[Mapping[_K, _T]] = None,\n basetype: Union[Type[_T], Sequence[Type[_T]]] = (),\n ) -> None:\n _events = {\n \"changing\": None,\n \"changed\": None,\n \"adding\": None,\n \"added\": None,\n \"removing\": None,\n \"removed\": None,\n \"updated\": None,\n }\n # For inheritance: If the mro already provides an EmitterGroup, add...\n if hasattr(self, \"events\") and isinstance(self.events, EmitterGroup):\n self.events.add(**_events)\n else:\n # otherwise create a new one\n self.events = EmitterGroup(\n source=self, auto_connect=False, **_events\n )\n super().__init__(data, basetype)\n\n def __setitem__(self, key: _K, value: _T):\n old = self._dict.get(key)\n if value is old or value == old:\n return\n if old is None:\n self.events.adding(key=key)\n super().__setitem__(key, value)\n self.events.added(key=key, value=value)\n self._connect_child_emitters(value)\n else:\n self.events.changing(key=key)\n super().__setitem__(key, value)\n self.events.changed(key=key, old_value=old, value=value)\n\n def __delitem__(self, key: _K):\n self.events.removing(key=key)\n self._disconnect_child_emitters(self[key])\n item = self._dict.pop(key)\n self.events.removed(key=key, value=item)\n\n def _reemit_child_event(self, event: Event):\n \"\"\"An item in the dict emitted an event. Re-emit with key\"\"\"\n if not hasattr(event, \"key\"):\n event.key = self.key(event.source)\n\n # re-emit with this object's EventEmitter\n self.events(event)\n\n def _disconnect_child_emitters(self, child: _T):\n \"\"\"Disconnect all events from the child from the re-emitter.\"\"\"\n if isinstance(child, SupportsEvents):\n child.events.disconnect(self._reemit_child_event)\n\n def _connect_child_emitters(self, child: _T):\n \"\"\"Connect all events from the child to be re-emitted.\"\"\"\n if isinstance(child, SupportsEvents):\n # make sure the event source has been set on the child\n if child.events.source is None:\n child.events.source = child\n child.events.connect(self._reemit_child_event)\n\n def key(self, value: _T):\n \"\"\"Return first instance of value.\"\"\"\n for k, v in self._dict.items():\n if v is value or v == value:\n return k\n return None\n", "path": "napari/utils/events/containers/_evented_dict.py"}]} | 1,672 | 211 |
gh_patches_debug_11353 | rasdani/github-patches | git_diff | iterative__dvc-951 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Testing against Python 3.6 and 3.7
https://github.com/pyinstaller/pyinstaller#main-advantages says that pyinstaller supports these versions of Python but https://github.com/iterative/dvc/blob/master/.travis.yml#L9 says the opposite.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/remote/base.py`
Content:
```
1 import os
2 import re
3
4 from dvc.config import Config
5 from dvc.logger import Logger
6 from dvc.exceptions import DvcException
7
8
9 STATUS_OK = 1
10 STATUS_NEW = 3
11 STATUS_DELETED = 4
12
13
14 STATUS_MAP = {
15 # (local_exists, remote_exists)
16 (True, True): STATUS_OK,
17 (False, False): STATUS_OK,
18 (True, False): STATUS_NEW,
19 (False, True): STATUS_DELETED,
20 }
21
22
23 class DataCloudError(DvcException):
24 """ Data Cloud exception """
25 def __init__(self, msg):
26 super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))
27
28
29 class RemoteBase(object):
30 REGEX = None
31 REQUIRES = {}
32
33 def __init__(self, project, config):
34 pass
35
36 @classmethod
37 def supported(cls, config):
38 url = config[Config.SECTION_REMOTE_URL]
39 url_ok = cls.match(url)
40 deps_ok = all(cls.REQUIRES.values())
41 if url_ok and not deps_ok:
42 missing = [k for k, v in cls.REQUIRES.items() if v is None]
43 msg = "URL \'{}\' is supported but requires " \
44 "these missing dependencies: {}"
45 Logger.warn(msg.format(url, str(missing)))
46 return url_ok and deps_ok
47
48 @classmethod
49 def match(cls, url):
50 return re.match(cls.REGEX, url)
51
52 def group(self, name):
53 m = self.match(self.url)
54 if not m:
55 return None
56 return m.group(name)
57
58 @staticmethod
59 def tmp_file(fname):
60 """ Temporary name for a partial download """
61 # FIXME probably better use uuid()
62 return fname + '.part'
63
64 def save_info(self, path_info):
65 raise NotImplementedError
66
67 def save(self, path_info):
68 raise NotImplementedError
69
70 def checkout(self, path_info, checksum_info):
71 raise NotImplementedError
72
73 def download(self, from_infos, to_infos, no_progress_bar=False, name=None):
74 raise NotImplementedError
75
76 def upload(self, from_infos, to_infos, path_info, name=None):
77 raise NotImplementedError
78
79 def remove(self, path_info):
80 raise NotImplementedError
81
82 def move(self, path_info):
83 raise NotImplementedError
84
85 def _makedirs(self, fname):
86 dname = os.path.dirname(fname)
87 try:
88 os.makedirs(dname)
89 except OSError as e:
90 if e.errno != os.errno.EEXIST:
91 raise
92
93 def md5s_to_path_infos(self, md5s):
94 raise NotImplementedError
95
96 def exists(self, path_infos):
97 raise NotImplementedError
98
99 @classmethod
100 def _verify_path_args(cls, from_infos, to_infos, names=None):
101 assert isinstance(from_infos, list)
102 assert isinstance(to_infos, list)
103 assert len(from_infos) == len(to_infos)
104
105 if not names:
106 names = len(to_infos) * [None]
107 else:
108 assert isinstance(names, list)
109 assert len(names) == len(to_infos)
110
111 return names
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/remote/base.py b/dvc/remote/base.py
--- a/dvc/remote/base.py
+++ b/dvc/remote/base.py
@@ -1,5 +1,6 @@
import os
import re
+import errno
from dvc.config import Config
from dvc.logger import Logger
@@ -84,10 +85,11 @@
def _makedirs(self, fname):
dname = os.path.dirname(fname)
+
try:
os.makedirs(dname)
except OSError as e:
- if e.errno != os.errno.EEXIST:
+ if e.errno != errno.EEXIST:
raise
def md5s_to_path_infos(self, md5s):
| {"golden_diff": "diff --git a/dvc/remote/base.py b/dvc/remote/base.py\n--- a/dvc/remote/base.py\n+++ b/dvc/remote/base.py\n@@ -1,5 +1,6 @@\n import os\n import re\n+import errno\n \n from dvc.config import Config\n from dvc.logger import Logger\n@@ -84,10 +85,11 @@\n \n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n+\n try:\n os.makedirs(dname)\n except OSError as e:\n- if e.errno != os.errno.EEXIST:\n+ if e.errno != errno.EEXIST:\n raise\n \n def md5s_to_path_infos(self, md5s):\n", "issue": "Testing against Python 3.6 and 3.7\nhttps://github.com/pyinstaller/pyinstaller#main-advantages says that pyinstaller supports these versions of Python but https://github.com/iterative/dvc/blob/master/.travis.yml#L9 says the opposite.\n", "before_files": [{"content": "import os\nimport re\n\nfrom dvc.config import Config\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\n\n\nSTATUS_OK = 1\nSTATUS_NEW = 3\nSTATUS_DELETED = 4\n\n\nSTATUS_MAP = {\n # (local_exists, remote_exists)\n (True, True): STATUS_OK,\n (False, False): STATUS_OK,\n (True, False): STATUS_NEW,\n (False, True): STATUS_DELETED,\n}\n\n\nclass DataCloudError(DvcException):\n \"\"\" Data Cloud exception \"\"\"\n def __init__(self, msg):\n super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))\n\n\nclass RemoteBase(object):\n REGEX = None\n REQUIRES = {}\n\n def __init__(self, project, config):\n pass\n\n @classmethod\n def supported(cls, config):\n url = config[Config.SECTION_REMOTE_URL]\n url_ok = cls.match(url)\n deps_ok = all(cls.REQUIRES.values())\n if url_ok and not deps_ok:\n missing = [k for k, v in cls.REQUIRES.items() if v is None]\n msg = \"URL \\'{}\\' is supported but requires \" \\\n \"these missing dependencies: {}\"\n Logger.warn(msg.format(url, str(missing)))\n return url_ok and deps_ok\n\n @classmethod\n def match(cls, url):\n return re.match(cls.REGEX, url)\n\n def group(self, name):\n m = self.match(self.url)\n if not m:\n return None\n return m.group(name)\n\n @staticmethod\n def tmp_file(fname):\n \"\"\" Temporary name for a partial download \"\"\"\n # FIXME probably better use uuid()\n return fname + '.part'\n\n def save_info(self, path_info):\n raise NotImplementedError\n\n def save(self, path_info):\n raise NotImplementedError\n\n def checkout(self, path_info, checksum_info):\n raise NotImplementedError\n\n def download(self, from_infos, to_infos, no_progress_bar=False, name=None):\n raise NotImplementedError\n\n def upload(self, from_infos, to_infos, path_info, name=None):\n raise NotImplementedError\n\n def remove(self, path_info):\n raise NotImplementedError\n\n def move(self, path_info):\n raise NotImplementedError\n\n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n try:\n os.makedirs(dname)\n except OSError as e:\n if e.errno != os.errno.EEXIST:\n raise\n\n def md5s_to_path_infos(self, md5s):\n raise NotImplementedError\n\n def exists(self, path_infos):\n raise NotImplementedError\n\n @classmethod\n def _verify_path_args(cls, from_infos, to_infos, names=None):\n assert isinstance(from_infos, list)\n assert isinstance(to_infos, list)\n assert len(from_infos) == len(to_infos)\n\n if not names:\n names = len(to_infos) * [None]\n else:\n assert isinstance(names, list)\n assert len(names) == len(to_infos)\n\n return names\n", "path": "dvc/remote/base.py"}], "after_files": [{"content": "import os\nimport re\nimport errno\n\nfrom dvc.config import Config\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\n\n\nSTATUS_OK = 1\nSTATUS_NEW = 3\nSTATUS_DELETED = 4\n\n\nSTATUS_MAP = {\n # (local_exists, remote_exists)\n (True, True): STATUS_OK,\n (False, False): STATUS_OK,\n (True, False): STATUS_NEW,\n (False, True): STATUS_DELETED,\n}\n\n\nclass DataCloudError(DvcException):\n \"\"\" Data Cloud exception \"\"\"\n def __init__(self, msg):\n super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))\n\n\nclass RemoteBase(object):\n REGEX = None\n REQUIRES = {}\n\n def __init__(self, project, config):\n pass\n\n @classmethod\n def supported(cls, config):\n url = config[Config.SECTION_REMOTE_URL]\n url_ok = cls.match(url)\n deps_ok = all(cls.REQUIRES.values())\n if url_ok and not deps_ok:\n missing = [k for k, v in cls.REQUIRES.items() if v is None]\n msg = \"URL \\'{}\\' is supported but requires \" \\\n \"these missing dependencies: {}\"\n Logger.warn(msg.format(url, str(missing)))\n return url_ok and deps_ok\n\n @classmethod\n def match(cls, url):\n return re.match(cls.REGEX, url)\n\n def group(self, name):\n m = self.match(self.url)\n if not m:\n return None\n return m.group(name)\n\n @staticmethod\n def tmp_file(fname):\n \"\"\" Temporary name for a partial download \"\"\"\n # FIXME probably better use uuid()\n return fname + '.part'\n\n def save_info(self, path_info):\n raise NotImplementedError\n\n def save(self, path_info):\n raise NotImplementedError\n\n def checkout(self, path_info, checksum_info):\n raise NotImplementedError\n\n def download(self, from_infos, to_infos, no_progress_bar=False, name=None):\n raise NotImplementedError\n\n def upload(self, from_infos, to_infos, path_info, name=None):\n raise NotImplementedError\n\n def remove(self, path_info):\n raise NotImplementedError\n\n def move(self, path_info):\n raise NotImplementedError\n\n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n\n try:\n os.makedirs(dname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n def md5s_to_path_infos(self, md5s):\n raise NotImplementedError\n\n def exists(self, path_infos):\n raise NotImplementedError\n\n @classmethod\n def _verify_path_args(cls, from_infos, to_infos, names=None):\n assert isinstance(from_infos, list)\n assert isinstance(to_infos, list)\n assert len(from_infos) == len(to_infos)\n\n if not names:\n names = len(to_infos) * [None]\n else:\n assert isinstance(names, list)\n assert len(names) == len(to_infos)\n\n return names\n", "path": "dvc/remote/base.py"}]} | 1,210 | 160 |
gh_patches_debug_4392 | rasdani/github-patches | git_diff | encode__starlette-1940 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
middleware causes exceptions to not be raised/handled silently
### Discussed in https://github.com/encode/starlette/discussions/1976
<div type='discussions-op-text'>
<sup>Originally posted by **fraser-langton** December 6, 2022</sup>
From [issue in FastAPI](https://github.com/tiangolo/fastapi/issues/5173) - issue was said to be from starlette
Was initially noticed in FastAPI 0.74.0, was fixed after 0.79.0 but has since regressed (FastAPI 0.88.0 it isn't working)
When exceptions are raised on a subapp, the exceptions are not propagated all the way to see in console
In the example code
/info raises an exception and the full stacktrace is seen in console
/private/info does not raise the exception and only `INFO: 127.0.0.1:56308 - "GET /info HTTP/1.1" 500 Internal Server Error` is shown in console
```python
import uvicorn
from fastapi import FastAPI
from starlette.middleware.base import BaseHTTPMiddleware
app = FastAPI()
@app.get("/info")
def info():
# raises Exception as expected, the traceback is seen in console
raise Exception
private_api = FastAPI()
@private_api.get("/info")
def info():
# exception is handled silently, no traceback is seen in console
raise Exception
app.mount("/private", private_api)
class Middleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
return await call_next(request)
app.add_middleware(Middleware) # when this is removed, the exceptions are raised for all routes
if __name__ == '__main__':
uvicorn.run(app, port=8000)
```
</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/base.py`
Content:
```
1 import typing
2
3 import anyio
4
5 from starlette.requests import Request
6 from starlette.responses import Response, StreamingResponse
7 from starlette.types import ASGIApp, Message, Receive, Scope, Send
8
9 RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]
10 DispatchFunction = typing.Callable[
11 [Request, RequestResponseEndpoint], typing.Awaitable[Response]
12 ]
13 T = typing.TypeVar("T")
14
15
16 class BaseHTTPMiddleware:
17 def __init__(
18 self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None
19 ) -> None:
20 self.app = app
21 self.dispatch_func = self.dispatch if dispatch is None else dispatch
22
23 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
24 if scope["type"] != "http":
25 await self.app(scope, receive, send)
26 return
27
28 response_sent = anyio.Event()
29
30 async def call_next(request: Request) -> Response:
31 app_exc: typing.Optional[Exception] = None
32 send_stream, recv_stream = anyio.create_memory_object_stream()
33
34 async def receive_or_disconnect() -> Message:
35 if response_sent.is_set():
36 return {"type": "http.disconnect"}
37
38 async with anyio.create_task_group() as task_group:
39
40 async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:
41 result = await func()
42 task_group.cancel_scope.cancel()
43 return result
44
45 task_group.start_soon(wrap, response_sent.wait)
46 message = await wrap(request.receive)
47
48 if response_sent.is_set():
49 return {"type": "http.disconnect"}
50
51 return message
52
53 async def close_recv_stream_on_response_sent() -> None:
54 await response_sent.wait()
55 recv_stream.close()
56
57 async def send_no_error(message: Message) -> None:
58 try:
59 await send_stream.send(message)
60 except anyio.BrokenResourceError:
61 # recv_stream has been closed, i.e. response_sent has been set.
62 return
63
64 async def coro() -> None:
65 nonlocal app_exc
66
67 async with send_stream:
68 try:
69 await self.app(scope, receive_or_disconnect, send_no_error)
70 except Exception as exc:
71 app_exc = exc
72
73 task_group.start_soon(close_recv_stream_on_response_sent)
74 task_group.start_soon(coro)
75
76 try:
77 message = await recv_stream.receive()
78 except anyio.EndOfStream:
79 if app_exc is not None:
80 raise app_exc
81 raise RuntimeError("No response returned.")
82
83 assert message["type"] == "http.response.start"
84
85 async def body_stream() -> typing.AsyncGenerator[bytes, None]:
86 async with recv_stream:
87 async for message in recv_stream:
88 assert message["type"] == "http.response.body"
89 body = message.get("body", b"")
90 if body:
91 yield body
92 if not message.get("more_body", False):
93 break
94
95 if app_exc is not None:
96 raise app_exc
97
98 response = StreamingResponse(
99 status_code=message["status"], content=body_stream()
100 )
101 response.raw_headers = message["headers"]
102 return response
103
104 async with anyio.create_task_group() as task_group:
105 request = Request(scope, receive=receive)
106 response = await self.dispatch_func(request, call_next)
107 await response(scope, receive, send)
108 response_sent.set()
109
110 async def dispatch(
111 self, request: Request, call_next: RequestResponseEndpoint
112 ) -> Response:
113 raise NotImplementedError() # pragma: no cover
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py
--- a/starlette/middleware/base.py
+++ b/starlette/middleware/base.py
@@ -89,8 +89,6 @@
body = message.get("body", b"")
if body:
yield body
- if not message.get("more_body", False):
- break
if app_exc is not None:
raise app_exc
| {"golden_diff": "diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py\n--- a/starlette/middleware/base.py\n+++ b/starlette/middleware/base.py\n@@ -89,8 +89,6 @@\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n- if not message.get(\"more_body\", False):\n- break\n \n if app_exc is not None:\n raise app_exc\n", "issue": "middleware causes exceptions to not be raised/handled silently\n### Discussed in https://github.com/encode/starlette/discussions/1976\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **fraser-langton** December 6, 2022</sup>\r\nFrom [issue in FastAPI](https://github.com/tiangolo/fastapi/issues/5173) - issue was said to be from starlette\r\n\r\nWas initially noticed in FastAPI 0.74.0, was fixed after 0.79.0 but has since regressed (FastAPI 0.88.0 it isn't working)\r\n\r\nWhen exceptions are raised on a subapp, the exceptions are not propagated all the way to see in console\r\nIn the example code\r\n/info raises an exception and the full stacktrace is seen in console\r\n/private/info does not raise the exception and only `INFO: 127.0.0.1:56308 - \"GET /info HTTP/1.1\" 500 Internal Server Error` is shown in console\r\n\r\n```python\r\nimport uvicorn\r\nfrom fastapi import FastAPI\r\nfrom starlette.middleware.base import BaseHTTPMiddleware\r\n\r\n\r\napp = FastAPI()\r\n\r\n\r\[email protected](\"/info\")\r\ndef info():\r\n # raises Exception as expected, the traceback is seen in console\r\n raise Exception\r\n\r\n\r\nprivate_api = FastAPI()\r\n\r\n\r\n@private_api.get(\"/info\")\r\ndef info():\r\n # exception is handled silently, no traceback is seen in console\r\n raise Exception\r\n\r\n\r\napp.mount(\"/private\", private_api)\r\n\r\n\r\nclass Middleware(BaseHTTPMiddleware):\r\n\r\n async def dispatch(self, request, call_next):\r\n return await call_next(request)\r\n\r\n\r\napp.add_middleware(Middleware) # when this is removed, the exceptions are raised for all routes\r\n\r\n\r\nif __name__ == '__main__':\r\n uvicorn.run(app, port=8000)\r\n```\r\n</div>\n", "before_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\nT = typing.TypeVar(\"T\")\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_sent = anyio.Event()\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def receive_or_disconnect() -> Message:\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n async with anyio.create_task_group() as task_group:\n\n async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:\n result = await func()\n task_group.cancel_scope.cancel()\n return result\n\n task_group.start_soon(wrap, response_sent.wait)\n message = await wrap(request.receive)\n\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n return message\n\n async def close_recv_stream_on_response_sent() -> None:\n await response_sent.wait()\n recv_stream.close()\n\n async def send_no_error(message: Message) -> None:\n try:\n await send_stream.send(message)\n except anyio.BrokenResourceError:\n # recv_stream has been closed, i.e. response_sent has been set.\n return\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, receive_or_disconnect, send_no_error)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(close_recv_stream_on_response_sent)\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n response_sent.set()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}], "after_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\nT = typing.TypeVar(\"T\")\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_sent = anyio.Event()\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def receive_or_disconnect() -> Message:\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n async with anyio.create_task_group() as task_group:\n\n async def wrap(func: typing.Callable[[], typing.Awaitable[T]]) -> T:\n result = await func()\n task_group.cancel_scope.cancel()\n return result\n\n task_group.start_soon(wrap, response_sent.wait)\n message = await wrap(request.receive)\n\n if response_sent.is_set():\n return {\"type\": \"http.disconnect\"}\n\n return message\n\n async def close_recv_stream_on_response_sent() -> None:\n await response_sent.wait()\n recv_stream.close()\n\n async def send_no_error(message: Message) -> None:\n try:\n await send_stream.send(message)\n except anyio.BrokenResourceError:\n # recv_stream has been closed, i.e. response_sent has been set.\n return\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, receive_or_disconnect, send_no_error)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(close_recv_stream_on_response_sent)\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n response_sent.set()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}]} | 1,682 | 96 |
gh_patches_debug_59726 | rasdani/github-patches | git_diff | pytorch__audio-755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🚀 Feature Request: Opus audio format support
## 🚀 Feature
<!-- -->
Add opus format
## Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
Opus is a good and popular lossy audio coding format. A lot of audio files are stored in opus format but it's impossible to use it with torchaudio.load().
## Pitch
<!-- A clear and concise description of what you want to happen. -->
Please, make possible to do
audio = torchaudio.load("my_audio.opus")
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build_tools/setup_helpers/extension.py`
Content:
```
1 import os
2 import platform
3 import subprocess
4 from pathlib import Path
5
6 from torch.utils.cpp_extension import (
7 CppExtension,
8 BuildExtension as TorchBuildExtension
9 )
10
11 __all__ = [
12 'get_ext_modules',
13 'BuildExtension',
14 ]
15
16 _THIS_DIR = Path(__file__).parent.resolve()
17 _ROOT_DIR = _THIS_DIR.parent.parent.resolve()
18 _CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'
19 _TP_BASE_DIR = _ROOT_DIR / 'third_party'
20 _TP_INSTALL_DIR = _TP_BASE_DIR / 'install'
21
22
23 def _get_build_sox():
24 val = os.environ.get('BUILD_SOX', '0')
25 trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']
26 falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']
27 if val in trues:
28 return True
29 if val not in falses:
30 print(
31 f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '
32 f'Expected one of {trues + falses}')
33 return False
34
35
36 _BUILD_SOX = _get_build_sox()
37
38
39 def _get_eca(debug):
40 eca = []
41 if debug:
42 eca += ["-O0", "-g"]
43 else:
44 eca += ["-O3"]
45 return eca
46
47
48 def _get_ela(debug):
49 ela = []
50 if debug:
51 if platform.system() == "Windows":
52 ela += ["/DEBUG:FULL"]
53 else:
54 ela += ["-O0", "-g"]
55 else:
56 ela += ["-O3"]
57 return ela
58
59
60 def _get_srcs():
61 return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]
62
63
64 def _get_include_dirs():
65 dirs = [
66 str(_ROOT_DIR),
67 ]
68 if _BUILD_SOX:
69 dirs.append(str(_TP_INSTALL_DIR / 'include'))
70 return dirs
71
72
73 def _get_extra_objects():
74 objs = []
75 if _BUILD_SOX:
76 # NOTE: The order of the library listed bellow matters.
77 #
78 # (the most important thing is that dependencies come after a library
79 # e.g., sox comes first, flac/vorbis comes before ogg, and
80 # vorbisenc/vorbisfile comes before vorbis
81 libs = [
82 'libsox.a',
83 'libmad.a',
84 'libFLAC.a',
85 'libmp3lame.a',
86 'libvorbisenc.a',
87 'libvorbisfile.a',
88 'libvorbis.a',
89 'libogg.a',
90 ]
91 for lib in libs:
92 objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))
93 return objs
94
95
96 def _get_libraries():
97 return [] if _BUILD_SOX else ['sox']
98
99
100 def _build_third_party():
101 build_dir = str(_TP_BASE_DIR / 'build')
102 os.makedirs(build_dir, exist_ok=True)
103 subprocess.run(
104 args=['cmake', '..'],
105 cwd=build_dir,
106 check=True,
107 )
108 subprocess.run(
109 args=['cmake', '--build', '.'],
110 cwd=build_dir,
111 check=True,
112 )
113
114
115 _EXT_NAME = 'torchaudio._torchaudio'
116
117
118 def get_ext_modules(debug=False):
119 if platform.system() == 'Windows':
120 return None
121 return [
122 CppExtension(
123 _EXT_NAME,
124 _get_srcs(),
125 libraries=_get_libraries(),
126 include_dirs=_get_include_dirs(),
127 extra_compile_args=_get_eca(debug),
128 extra_objects=_get_extra_objects(),
129 extra_link_args=_get_ela(debug),
130 ),
131 ]
132
133
134 class BuildExtension(TorchBuildExtension):
135 def build_extension(self, ext):
136 if ext.name == _EXT_NAME and _BUILD_SOX:
137 _build_third_party()
138 super().build_extension(ext)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py
--- a/build_tools/setup_helpers/extension.py
+++ b/build_tools/setup_helpers/extension.py
@@ -83,6 +83,8 @@
'libmad.a',
'libFLAC.a',
'libmp3lame.a',
+ 'libopusfile.a',
+ 'libopus.a',
'libvorbisenc.a',
'libvorbisfile.a',
'libvorbis.a',
| {"golden_diff": "diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py\n--- a/build_tools/setup_helpers/extension.py\n+++ b/build_tools/setup_helpers/extension.py\n@@ -83,6 +83,8 @@\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n+ 'libopusfile.a',\n+ 'libopus.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n", "issue": "\ud83d\ude80 Feature Request: Opus audio format support\n## \ud83d\ude80 Feature\r\n<!-- -->\r\nAdd opus format \r\n## Motivation\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\nOpus is a good and popular lossy audio coding format. A lot of audio files are stored in opus format but it's impossible to use it with torchaudio.load(). \r\n## Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nPlease, make possible to do \r\naudio = torchaudio.load(\"my_audio.opus\")\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'install'\n\n\ndef _get_build_sox():\n val = os.environ.get('BUILD_SOX', '0')\n trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']\n falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']\n if val in trues:\n return True\n if val not in falses:\n print(\n f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '\n f'Expected one of {trues + falses}')\n return False\n\n\n_BUILD_SOX = _get_build_sox()\n\n\ndef _get_eca(debug):\n eca = []\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n eca += [\"-O3\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n dirs = [\n str(_ROOT_DIR),\n ]\n if _BUILD_SOX:\n dirs.append(str(_TP_INSTALL_DIR / 'include'))\n return dirs\n\n\ndef _get_extra_objects():\n objs = []\n if _BUILD_SOX:\n # NOTE: The order of the library listed bellow matters.\n #\n # (the most important thing is that dependencies come after a library\n # e.g., sox comes first, flac/vorbis comes before ogg, and\n # vorbisenc/vorbisfile comes before vorbis\n libs = [\n 'libsox.a',\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n 'libogg.a',\n ]\n for lib in libs:\n objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))\n return objs\n\n\ndef _get_libraries():\n return [] if _BUILD_SOX else ['sox']\n\n\ndef _build_third_party():\n build_dir = str(_TP_BASE_DIR / 'build')\n os.makedirs(build_dir, exist_ok=True)\n subprocess.run(\n args=['cmake', '..'],\n cwd=build_dir,\n check=True,\n )\n subprocess.run(\n args=['cmake', '--build', '.'],\n cwd=build_dir,\n check=True,\n )\n\n\n_EXT_NAME = 'torchaudio._torchaudio'\n\n\ndef get_ext_modules(debug=False):\n if platform.system() == 'Windows':\n return None\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_objects=_get_extra_objects(),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME and _BUILD_SOX:\n _build_third_party()\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}], "after_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchaudio' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'install'\n\n\ndef _get_build_sox():\n val = os.environ.get('BUILD_SOX', '0')\n trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']\n falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']\n if val in trues:\n return True\n if val not in falses:\n print(\n f'WARNING: Unexpected environment variable value `BUILD_SOX={val}`. '\n f'Expected one of {trues + falses}')\n return False\n\n\n_BUILD_SOX = _get_build_sox()\n\n\ndef _get_eca(debug):\n eca = []\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n eca += [\"-O3\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n dirs = [\n str(_ROOT_DIR),\n ]\n if _BUILD_SOX:\n dirs.append(str(_TP_INSTALL_DIR / 'include'))\n return dirs\n\n\ndef _get_extra_objects():\n objs = []\n if _BUILD_SOX:\n # NOTE: The order of the library listed bellow matters.\n #\n # (the most important thing is that dependencies come after a library\n # e.g., sox comes first, flac/vorbis comes before ogg, and\n # vorbisenc/vorbisfile comes before vorbis\n libs = [\n 'libsox.a',\n 'libmad.a',\n 'libFLAC.a',\n 'libmp3lame.a',\n 'libopusfile.a',\n 'libopus.a',\n 'libvorbisenc.a',\n 'libvorbisfile.a',\n 'libvorbis.a',\n 'libogg.a',\n ]\n for lib in libs:\n objs.append(str(_TP_INSTALL_DIR / 'lib' / lib))\n return objs\n\n\ndef _get_libraries():\n return [] if _BUILD_SOX else ['sox']\n\n\ndef _build_third_party():\n build_dir = str(_TP_BASE_DIR / 'build')\n os.makedirs(build_dir, exist_ok=True)\n subprocess.run(\n args=['cmake', '..'],\n cwd=build_dir,\n check=True,\n )\n subprocess.run(\n args=['cmake', '--build', '.'],\n cwd=build_dir,\n check=True,\n )\n\n\n_EXT_NAME = 'torchaudio._torchaudio'\n\n\ndef get_ext_modules(debug=False):\n if platform.system() == 'Windows':\n return None\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_objects=_get_extra_objects(),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME and _BUILD_SOX:\n _build_third_party()\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}]} | 1,590 | 115 |
gh_patches_debug_39666 | rasdani/github-patches | git_diff | SeldonIO__MLServer-288 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider using `tobytes()` instead of `tolist()` in numpy codec for performance improvement
Our use-case is to make multiple requests to MLServer and all of them will have a 640x640 image data in numpy format as input.
Given that `tobytes()` is way faster than `tolist()` does it make sense to support it by default in the numpy codec here: https://github.com/SeldonIO/MLServer/blob/61d9f078d1f886d14083551f33db6a3146f12745/mlserver/codecs/numpy.py#L54
----
Performance comparison of the two methods:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/codecs/numpy.py`
Content:
```
1 import numpy as np
2
3 from ..types import RequestInput, ResponseOutput
4
5 from .base import InputCodec, register_input_codec, register_request_codec
6 from .utils import FirstInputRequestCodec
7
8 _DatatypeToNumpy = {
9 "BOOL": "bool",
10 "UINT8": "uint8",
11 "UINT16": "uint16",
12 "UINT32": "uint32",
13 "UINT64": "uint64",
14 "INT8": "int8",
15 "INT16": "int16",
16 "INT32": "int32",
17 "INT64": "int64",
18 "FP16": "float16",
19 "FP32": "float32",
20 "FP64": "float64",
21 "BYTES": "byte",
22 }
23
24 _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
25
26 # NOTE: numpy has more types than v2 protocol
27 _NumpyToDatatype["object"] = "BYTES"
28
29
30 def _to_dtype(datatype: str) -> "np.dtype":
31 dtype = _DatatypeToNumpy[datatype]
32 return np.dtype(dtype)
33
34
35 def _to_datatype(dtype: np.dtype) -> str:
36 as_str = str(dtype)
37 datatype = _NumpyToDatatype[as_str]
38
39 return datatype
40
41
42 @register_input_codec
43 class NumpyCodec(InputCodec):
44 """
45 Encodes a tensor as a numpy array.
46 """
47
48 ContentType = "np"
49
50 @classmethod
51 def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:
52 return ResponseOutput(
53 name=name,
54 datatype=_to_datatype(payload.dtype),
55 shape=list(payload.shape),
56 data=payload.flatten().tolist(),
57 )
58
59 @classmethod
60 def decode(cls, request_input: RequestInput) -> np.ndarray:
61 dtype = _to_dtype(request_input.datatype)
62 data = getattr(request_input.data, "__root__", request_input.data)
63
64 model_data = np.array(data, dtype=dtype)
65
66 # TODO: Check if reshape not valid
67 return model_data.reshape(request_input.shape)
68
69
70 @register_request_codec
71 class NumpyRequestCodec(FirstInputRequestCodec):
72 InputCodec = NumpyCodec
73 ContentType = NumpyCodec.ContentType
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py
--- a/mlserver/codecs/numpy.py
+++ b/mlserver/codecs/numpy.py
@@ -1,5 +1,7 @@
import numpy as np
+from typing import Any
+
from ..types import RequestInput, ResponseOutput
from .base import InputCodec, register_input_codec, register_request_codec
@@ -18,27 +20,59 @@
"FP16": "float16",
"FP32": "float32",
"FP64": "float64",
- "BYTES": "byte",
+ "BYTES": "bytes",
}
_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}
# NOTE: numpy has more types than v2 protocol
_NumpyToDatatype["object"] = "BYTES"
+_NumpyToDatatype["S"] = "BYTES"
+
+def _to_dtype(request_input: RequestInput) -> "np.dtype":
+ dtype = _DatatypeToNumpy[request_input.datatype]
+
+ if request_input.datatype == "BYTES":
+ # bytes have variable size, so need to specify as part of type
+ # TODO: Make elem size variable (and not just the last dimension)
+ elem_size = request_input.shape[-1]
+ return np.dtype((dtype, elem_size))
-def _to_dtype(datatype: str) -> "np.dtype":
- dtype = _DatatypeToNumpy[datatype]
return np.dtype(dtype)
def _to_datatype(dtype: np.dtype) -> str:
as_str = str(dtype)
+
+ if as_str not in _NumpyToDatatype:
+ # If not present, try with kind
+ as_str = getattr(dtype, "kind")
+
datatype = _NumpyToDatatype[as_str]
return datatype
+def _to_ndarray(request_input: RequestInput) -> np.ndarray:
+ data = getattr(request_input.data, "__root__", request_input.data)
+ dtype = _to_dtype(request_input)
+
+ if request_input.datatype == "BYTES":
+ return np.frombuffer(data, dtype)
+
+ return np.array(data, dtype)
+
+
+def _encode_data(data: np.ndarray, datatype: str) -> Any:
+ if datatype == "BYTES":
+ # tobytes is way faster than tolist, although it's harder to serialise
+ # and only makes sense for actual bytes inputs (#253)
+ return data.tobytes()
+
+ return data.flatten().tolist()
+
+
@register_input_codec
class NumpyCodec(InputCodec):
"""
@@ -49,19 +83,18 @@
@classmethod
def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:
+ datatype = _to_datatype(payload.dtype)
+
return ResponseOutput(
name=name,
- datatype=_to_datatype(payload.dtype),
+ datatype=datatype,
shape=list(payload.shape),
- data=payload.flatten().tolist(),
+ data=_encode_data(payload, datatype),
)
@classmethod
def decode(cls, request_input: RequestInput) -> np.ndarray:
- dtype = _to_dtype(request_input.datatype)
- data = getattr(request_input.data, "__root__", request_input.data)
-
- model_data = np.array(data, dtype=dtype)
+ model_data = _to_ndarray(request_input)
# TODO: Check if reshape not valid
return model_data.reshape(request_input.shape)
| {"golden_diff": "diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py\n--- a/mlserver/codecs/numpy.py\n+++ b/mlserver/codecs/numpy.py\n@@ -1,5 +1,7 @@\n import numpy as np\n \n+from typing import Any\n+\n from ..types import RequestInput, ResponseOutput\n \n from .base import InputCodec, register_input_codec, register_request_codec\n@@ -18,27 +20,59 @@\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n- \"BYTES\": \"byte\",\n+ \"BYTES\": \"bytes\",\n }\n \n _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n \n # NOTE: numpy has more types than v2 protocol\n _NumpyToDatatype[\"object\"] = \"BYTES\"\n+_NumpyToDatatype[\"S\"] = \"BYTES\"\n+\n \n+def _to_dtype(request_input: RequestInput) -> \"np.dtype\":\n+ dtype = _DatatypeToNumpy[request_input.datatype]\n+\n+ if request_input.datatype == \"BYTES\":\n+ # bytes have variable size, so need to specify as part of type\n+ # TODO: Make elem size variable (and not just the last dimension)\n+ elem_size = request_input.shape[-1]\n+ return np.dtype((dtype, elem_size))\n \n-def _to_dtype(datatype: str) -> \"np.dtype\":\n- dtype = _DatatypeToNumpy[datatype]\n return np.dtype(dtype)\n \n \n def _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n+\n+ if as_str not in _NumpyToDatatype:\n+ # If not present, try with kind\n+ as_str = getattr(dtype, \"kind\")\n+\n datatype = _NumpyToDatatype[as_str]\n \n return datatype\n \n \n+def _to_ndarray(request_input: RequestInput) -> np.ndarray:\n+ data = getattr(request_input.data, \"__root__\", request_input.data)\n+ dtype = _to_dtype(request_input)\n+\n+ if request_input.datatype == \"BYTES\":\n+ return np.frombuffer(data, dtype)\n+\n+ return np.array(data, dtype)\n+\n+\n+def _encode_data(data: np.ndarray, datatype: str) -> Any:\n+ if datatype == \"BYTES\":\n+ # tobytes is way faster than tolist, although it's harder to serialise\n+ # and only makes sense for actual bytes inputs (#253)\n+ return data.tobytes()\n+\n+ return data.flatten().tolist()\n+\n+\n @register_input_codec\n class NumpyCodec(InputCodec):\n \"\"\"\n@@ -49,19 +83,18 @@\n \n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n+ datatype = _to_datatype(payload.dtype)\n+\n return ResponseOutput(\n name=name,\n- datatype=_to_datatype(payload.dtype),\n+ datatype=datatype,\n shape=list(payload.shape),\n- data=payload.flatten().tolist(),\n+ data=_encode_data(payload, datatype),\n )\n \n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n- dtype = _to_dtype(request_input.datatype)\n- data = getattr(request_input.data, \"__root__\", request_input.data)\n-\n- model_data = np.array(data, dtype=dtype)\n+ model_data = _to_ndarray(request_input)\n \n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n", "issue": "Consider using `tobytes()` instead of `tolist()` in numpy codec for performance improvement\nOur use-case is to make multiple requests to MLServer and all of them will have a 640x640 image data in numpy format as input.\r\n\r\nGiven that `tobytes()` is way faster than `tolist()` does it make sense to support it by default in the numpy codec here: https://github.com/SeldonIO/MLServer/blob/61d9f078d1f886d14083551f33db6a3146f12745/mlserver/codecs/numpy.py#L54\r\n\r\n----\r\n\r\nPerformance comparison of the two methods:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom ..types import RequestInput, ResponseOutput\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import FirstInputRequestCodec\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"byte\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n\n\ndef _to_dtype(datatype: str) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[datatype]\n return np.dtype(dtype)\n\n\ndef _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Encodes a tensor as a numpy array.\n \"\"\"\n\n ContentType = \"np\"\n\n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n return ResponseOutput(\n name=name,\n datatype=_to_datatype(payload.dtype),\n shape=list(payload.shape),\n data=payload.flatten().tolist(),\n )\n\n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n dtype = _to_dtype(request_input.datatype)\n data = getattr(request_input.data, \"__root__\", request_input.data)\n\n model_data = np.array(data, dtype=dtype)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(FirstInputRequestCodec):\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}], "after_files": [{"content": "import numpy as np\n\nfrom typing import Any\n\nfrom ..types import RequestInput, ResponseOutput\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import FirstInputRequestCodec\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"bytes\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n_NumpyToDatatype[\"S\"] = \"BYTES\"\n\n\ndef _to_dtype(request_input: RequestInput) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[request_input.datatype]\n\n if request_input.datatype == \"BYTES\":\n # bytes have variable size, so need to specify as part of type\n # TODO: Make elem size variable (and not just the last dimension)\n elem_size = request_input.shape[-1]\n return np.dtype((dtype, elem_size))\n\n return np.dtype(dtype)\n\n\ndef _to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n\n if as_str not in _NumpyToDatatype:\n # If not present, try with kind\n as_str = getattr(dtype, \"kind\")\n\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\ndef _to_ndarray(request_input: RequestInput) -> np.ndarray:\n data = getattr(request_input.data, \"__root__\", request_input.data)\n dtype = _to_dtype(request_input)\n\n if request_input.datatype == \"BYTES\":\n return np.frombuffer(data, dtype)\n\n return np.array(data, dtype)\n\n\ndef _encode_data(data: np.ndarray, datatype: str) -> Any:\n if datatype == \"BYTES\":\n # tobytes is way faster than tolist, although it's harder to serialise\n # and only makes sense for actual bytes inputs (#253)\n return data.tobytes()\n\n return data.flatten().tolist()\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Encodes a tensor as a numpy array.\n \"\"\"\n\n ContentType = \"np\"\n\n @classmethod\n def encode(cls, name: str, payload: np.ndarray) -> ResponseOutput:\n datatype = _to_datatype(payload.dtype)\n\n return ResponseOutput(\n name=name,\n datatype=datatype,\n shape=list(payload.shape),\n data=_encode_data(payload, datatype),\n )\n\n @classmethod\n def decode(cls, request_input: RequestInput) -> np.ndarray:\n model_data = _to_ndarray(request_input)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(FirstInputRequestCodec):\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}]} | 1,152 | 796 |
gh_patches_debug_40713 | rasdani/github-patches | git_diff | translate__pootle-3806 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document the dump management command
`dump` looks like it could be useful for development. But I'm not 100% sure what data I'm looking at when I execute it. @ta2-1 I suspect this one might be yours. Let me know how it works and I'll happily document it. Or the other option is to remove it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/management/commands/dump.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10
11 import os
12
13 import sys
14 reload(sys)
15 sys.setdefaultencoding('utf-8')
16
17 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
18
19 from optparse import make_option
20
21 from pootle.core.utils.json import jsonify
22 from pootle_app.management.commands import PootleCommand
23 from pootle_app.models import Directory
24 from pootle_project.models import Project
25
26 DUMPED = {
27 'TranslationProject': ('pootle_path', 'real_path', 'disabled'),
28 'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),
29 'Directory': ('name', 'parent', 'pootle_path'),
30 'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',
31 'developer_comment', 'translator_comment', 'locations',
32 'isobsolete', 'isfuzzy', 'istranslated'),
33 'Suggestion': ('target_f', 'user_id'),
34 'Language': ('code', 'fullname', 'pootle_path'),
35 'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',
36 'treestyle', 'source_language', 'ignoredfiles',
37 'screenshot_search_prefix', 'disabled')
38 }
39
40
41 class Command(PootleCommand):
42 help = "Dump data."
43
44 shared_option_list = (
45 make_option('--stats', action='store_true', dest='stats',
46 help='Dump stats'),
47 make_option('--data', action='store_true', dest='data',
48 help='Data all data'),
49 make_option('--stop-level', action='store', dest='stop_level',
50 default=-1),
51 )
52 option_list = PootleCommand.option_list + shared_option_list
53
54 def handle_all(self, **options):
55 if not self.projects and not self.languages:
56 stats = options.get('stats', False)
57 data = options.get('data', False)
58 stop_level = int(options.get('stop_level', -1))
59 if stats:
60 self.dump_stats(stop_level=stop_level)
61 if data:
62 self.dump_all(stop_level=stop_level)
63 else:
64 super(Command, self).handle_all(**options)
65
66 def handle_translation_project(self, tp, **options):
67 stats = options.get('stats', False)
68 data = options.get('data', False)
69 stop_level = int(options.get('stop_level', -1))
70 if stats:
71 res = {}
72 self._dump_stats(tp.directory, res, stop_level=stop_level)
73
74 stats_dump = jsonify(res)
75 self.stdout.write(stats_dump)
76 if data:
77 self._dump_item(tp.directory, 0, stop_level=stop_level)
78
79 def dump_stats(self, stop_level):
80 res = {}
81 for prj in Project.objects.all():
82 self._dump_stats(prj, res, stop_level=stop_level)
83
84 for key, item in res.items():
85 out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
86 (key, item['total'], item['translated'], item['fuzzy'],
87 item['suggestions'], item['critical'], item['is_dirty'],
88 item['lastaction']['id'], item['lastupdated']['id'])
89
90 self.stdout.write(out)
91
92 def _dump_stats(self, item, res, stop_level):
93 key = item.get_cachekey()
94 item.initialize_children()
95
96 if stop_level != 0 and item.children:
97 if stop_level > 0:
98 stop_level = stop_level - 1
99 for child in item.children:
100 self._dump_stats(child, res,
101 stop_level=stop_level)
102
103 res[key] = (item.get_stats(include_children=False))
104
105 def dump_all(self, stop_level):
106 root = Directory.objects.root
107 self._dump_item(root, 0, stop_level=stop_level)
108
109 def _dump_item(self, item, level, stop_level):
110 self.stdout.write(self.dumped(item))
111 if item.is_dir:
112 # item is a Directory
113 if item.is_project():
114 self.stdout.write(self.dumped(item.project))
115 elif item.is_language():
116 self.stdout.write(self.dumped(item.language))
117 elif item.is_translationproject():
118 try:
119 self.stdout.write(self.dumped(item.translationproject))
120 except:
121 pass
122 else:
123 # item should be a Store
124 for unit in item.units:
125 self.stdout.write(self.dumped(unit))
126 for sg in unit.get_suggestions():
127 self.stdout.write(self.dumped(sg))
128
129 if stop_level != level:
130 item.initialize_children()
131 if item.children:
132 for child in item.children:
133 self._dump_item(child, level + 1, stop_level=stop_level)
134
135 def dumped(self, item):
136 def get_param(param):
137 p = getattr(item, param)
138 res = p() if callable(p) else p
139 res = u"%s" % res
140 res = res.replace('\n', '\\n')
141 return (param, res)
142
143 return u"%d:%s\t%s" % \
144 (
145 item.id,
146 item._meta.object_name,
147 "\t".join(
148 u"%s=%s" % (k, v)
149 for k, v in map(get_param, DUMPED[item._meta.object_name])
150 )
151 )
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_app/management/commands/dump.py b/pootle/apps/pootle_app/management/commands/dump.py
--- a/pootle/apps/pootle_app/management/commands/dump.py
+++ b/pootle/apps/pootle_app/management/commands/dump.py
@@ -18,7 +18,8 @@
from optparse import make_option
-from pootle.core.utils.json import jsonify
+from django.core.management.base import CommandError
+
from pootle_app.management.commands import PootleCommand
from pootle_app.models import Directory
from pootle_project.models import Project
@@ -56,10 +57,15 @@
stats = options.get('stats', False)
data = options.get('data', False)
stop_level = int(options.get('stop_level', -1))
+
if stats:
self.dump_stats(stop_level=stop_level)
+ return
if data:
self.dump_all(stop_level=stop_level)
+ return
+
+ raise CommandError("Set --data or --stats option.")
else:
super(Command, self).handle_all(**options)
@@ -70,25 +76,19 @@
if stats:
res = {}
self._dump_stats(tp.directory, res, stop_level=stop_level)
+ return
- stats_dump = jsonify(res)
- self.stdout.write(stats_dump)
if data:
self._dump_item(tp.directory, 0, stop_level=stop_level)
+ return
+
+ raise CommandError("Set --data or --stats option.")
def dump_stats(self, stop_level):
res = {}
for prj in Project.objects.all():
self._dump_stats(prj, res, stop_level=stop_level)
- for key, item in res.items():
- out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
- (key, item['total'], item['translated'], item['fuzzy'],
- item['suggestions'], item['critical'], item['is_dirty'],
- item['lastaction']['id'], item['lastupdated']['id'])
-
- self.stdout.write(out)
-
def _dump_stats(self, item, res, stop_level):
key = item.get_cachekey()
item.initialize_children()
@@ -102,6 +102,24 @@
res[key] = (item.get_stats(include_children=False))
+ if res[key]['lastaction']:
+ last_action_id = res[key]['lastaction']['id']
+ else:
+ last_action_id = None
+
+ if res[key]['lastupdated']:
+ last_updated_id = res[key]['lastupdated']['id']
+ else:
+ last_updated_id = None
+
+ out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
+ (key, res[key]['total'], res[key]['translated'],
+ res[key]['fuzzy'], res[key]['suggestions'],
+ res[key]['critical'], res[key]['is_dirty'],
+ last_action_id, last_updated_id)
+
+ self.stdout.write(out)
+
def dump_all(self, stop_level):
root = Directory.objects.root
self._dump_item(root, 0, stop_level=stop_level)
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/dump.py b/pootle/apps/pootle_app/management/commands/dump.py\n--- a/pootle/apps/pootle_app/management/commands/dump.py\n+++ b/pootle/apps/pootle_app/management/commands/dump.py\n@@ -18,7 +18,8 @@\n \n from optparse import make_option\n \n-from pootle.core.utils.json import jsonify\n+from django.core.management.base import CommandError\n+\n from pootle_app.management.commands import PootleCommand\n from pootle_app.models import Directory\n from pootle_project.models import Project\n@@ -56,10 +57,15 @@\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n+\n if stats:\n self.dump_stats(stop_level=stop_level)\n+ return\n if data:\n self.dump_all(stop_level=stop_level)\n+ return\n+\n+ raise CommandError(\"Set --data or --stats option.\")\n else:\n super(Command, self).handle_all(**options)\n \n@@ -70,25 +76,19 @@\n if stats:\n res = {}\n self._dump_stats(tp.directory, res, stop_level=stop_level)\n+ return\n \n- stats_dump = jsonify(res)\n- self.stdout.write(stats_dump)\n if data:\n self._dump_item(tp.directory, 0, stop_level=stop_level)\n+ return\n+\n+ raise CommandError(\"Set --data or --stats option.\")\n \n def dump_stats(self, stop_level):\n res = {}\n for prj in Project.objects.all():\n self._dump_stats(prj, res, stop_level=stop_level)\n \n- for key, item in res.items():\n- out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n- (key, item['total'], item['translated'], item['fuzzy'],\n- item['suggestions'], item['critical'], item['is_dirty'],\n- item['lastaction']['id'], item['lastupdated']['id'])\n-\n- self.stdout.write(out)\n-\n def _dump_stats(self, item, res, stop_level):\n key = item.get_cachekey()\n item.initialize_children()\n@@ -102,6 +102,24 @@\n \n res[key] = (item.get_stats(include_children=False))\n \n+ if res[key]['lastaction']:\n+ last_action_id = res[key]['lastaction']['id']\n+ else:\n+ last_action_id = None\n+\n+ if res[key]['lastupdated']:\n+ last_updated_id = res[key]['lastupdated']['id']\n+ else:\n+ last_updated_id = None\n+\n+ out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n+ (key, res[key]['total'], res[key]['translated'],\n+ res[key]['fuzzy'], res[key]['suggestions'],\n+ res[key]['critical'], res[key]['is_dirty'],\n+ last_action_id, last_updated_id)\n+\n+ self.stdout.write(out)\n+\n def dump_all(self, stop_level):\n root = Directory.objects.root\n self._dump_item(root, 0, stop_level=stop_level)\n", "issue": "Document the dump management command\n`dump` looks like it could be useful for development. But I'm not 100% sure what data I'm looking at when I execute it. @ta2-1 I suspect this one might be yours. Let me know how it works and I'll happily document it. Or the other option is to remove it.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nimport os\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom pootle.core.utils.json import jsonify\nfrom pootle_app.management.commands import PootleCommand\nfrom pootle_app.models import Directory\nfrom pootle_project.models import Project\n\nDUMPED = {\n 'TranslationProject': ('pootle_path', 'real_path', 'disabled'),\n 'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),\n 'Directory': ('name', 'parent', 'pootle_path'),\n 'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',\n 'developer_comment', 'translator_comment', 'locations',\n 'isobsolete', 'isfuzzy', 'istranslated'),\n 'Suggestion': ('target_f', 'user_id'),\n 'Language': ('code', 'fullname', 'pootle_path'),\n 'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',\n 'treestyle', 'source_language', 'ignoredfiles',\n 'screenshot_search_prefix', 'disabled')\n}\n\n\nclass Command(PootleCommand):\n help = \"Dump data.\"\n\n shared_option_list = (\n make_option('--stats', action='store_true', dest='stats',\n help='Dump stats'),\n make_option('--data', action='store_true', dest='data',\n help='Data all data'),\n make_option('--stop-level', action='store', dest='stop_level',\n default=-1),\n )\n option_list = PootleCommand.option_list + shared_option_list\n\n def handle_all(self, **options):\n if not self.projects and not self.languages:\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n if stats:\n self.dump_stats(stop_level=stop_level)\n if data:\n self.dump_all(stop_level=stop_level)\n else:\n super(Command, self).handle_all(**options)\n\n def handle_translation_project(self, tp, **options):\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n if stats:\n res = {}\n self._dump_stats(tp.directory, res, stop_level=stop_level)\n\n stats_dump = jsonify(res)\n self.stdout.write(stats_dump)\n if data:\n self._dump_item(tp.directory, 0, stop_level=stop_level)\n\n def dump_stats(self, stop_level):\n res = {}\n for prj in Project.objects.all():\n self._dump_stats(prj, res, stop_level=stop_level)\n\n for key, item in res.items():\n out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n (key, item['total'], item['translated'], item['fuzzy'],\n item['suggestions'], item['critical'], item['is_dirty'],\n item['lastaction']['id'], item['lastupdated']['id'])\n\n self.stdout.write(out)\n\n def _dump_stats(self, item, res, stop_level):\n key = item.get_cachekey()\n item.initialize_children()\n\n if stop_level != 0 and item.children:\n if stop_level > 0:\n stop_level = stop_level - 1\n for child in item.children:\n self._dump_stats(child, res,\n stop_level=stop_level)\n\n res[key] = (item.get_stats(include_children=False))\n\n def dump_all(self, stop_level):\n root = Directory.objects.root\n self._dump_item(root, 0, stop_level=stop_level)\n\n def _dump_item(self, item, level, stop_level):\n self.stdout.write(self.dumped(item))\n if item.is_dir:\n # item is a Directory\n if item.is_project():\n self.stdout.write(self.dumped(item.project))\n elif item.is_language():\n self.stdout.write(self.dumped(item.language))\n elif item.is_translationproject():\n try:\n self.stdout.write(self.dumped(item.translationproject))\n except:\n pass\n else:\n # item should be a Store\n for unit in item.units:\n self.stdout.write(self.dumped(unit))\n for sg in unit.get_suggestions():\n self.stdout.write(self.dumped(sg))\n\n if stop_level != level:\n item.initialize_children()\n if item.children:\n for child in item.children:\n self._dump_item(child, level + 1, stop_level=stop_level)\n\n def dumped(self, item):\n def get_param(param):\n p = getattr(item, param)\n res = p() if callable(p) else p\n res = u\"%s\" % res\n res = res.replace('\\n', '\\\\n')\n return (param, res)\n\n return u\"%d:%s\\t%s\" % \\\n (\n item.id,\n item._meta.object_name,\n \"\\t\".join(\n u\"%s=%s\" % (k, v)\n for k, v in map(get_param, DUMPED[item._meta.object_name])\n )\n )\n", "path": "pootle/apps/pootle_app/management/commands/dump.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nimport os\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom django.core.management.base import CommandError\n\nfrom pootle_app.management.commands import PootleCommand\nfrom pootle_app.models import Directory\nfrom pootle_project.models import Project\n\nDUMPED = {\n 'TranslationProject': ('pootle_path', 'real_path', 'disabled'),\n 'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),\n 'Directory': ('name', 'parent', 'pootle_path'),\n 'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',\n 'developer_comment', 'translator_comment', 'locations',\n 'isobsolete', 'isfuzzy', 'istranslated'),\n 'Suggestion': ('target_f', 'user_id'),\n 'Language': ('code', 'fullname', 'pootle_path'),\n 'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',\n 'treestyle', 'source_language', 'ignoredfiles',\n 'screenshot_search_prefix', 'disabled')\n}\n\n\nclass Command(PootleCommand):\n help = \"Dump data.\"\n\n shared_option_list = (\n make_option('--stats', action='store_true', dest='stats',\n help='Dump stats'),\n make_option('--data', action='store_true', dest='data',\n help='Data all data'),\n make_option('--stop-level', action='store', dest='stop_level',\n default=-1),\n )\n option_list = PootleCommand.option_list + shared_option_list\n\n def handle_all(self, **options):\n if not self.projects and not self.languages:\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n\n if stats:\n self.dump_stats(stop_level=stop_level)\n return\n if data:\n self.dump_all(stop_level=stop_level)\n return\n\n raise CommandError(\"Set --data or --stats option.\")\n else:\n super(Command, self).handle_all(**options)\n\n def handle_translation_project(self, tp, **options):\n stats = options.get('stats', False)\n data = options.get('data', False)\n stop_level = int(options.get('stop_level', -1))\n if stats:\n res = {}\n self._dump_stats(tp.directory, res, stop_level=stop_level)\n return\n\n if data:\n self._dump_item(tp.directory, 0, stop_level=stop_level)\n return\n\n raise CommandError(\"Set --data or --stats option.\")\n\n def dump_stats(self, stop_level):\n res = {}\n for prj in Project.objects.all():\n self._dump_stats(prj, res, stop_level=stop_level)\n\n def _dump_stats(self, item, res, stop_level):\n key = item.get_cachekey()\n item.initialize_children()\n\n if stop_level != 0 and item.children:\n if stop_level > 0:\n stop_level = stop_level - 1\n for child in item.children:\n self._dump_stats(child, res,\n stop_level=stop_level)\n\n res[key] = (item.get_stats(include_children=False))\n\n if res[key]['lastaction']:\n last_action_id = res[key]['lastaction']['id']\n else:\n last_action_id = None\n\n if res[key]['lastupdated']:\n last_updated_id = res[key]['lastupdated']['id']\n else:\n last_updated_id = None\n\n out = u\"%s %s,%s,%s,%s,%s,%s,%s,%s\" % \\\n (key, res[key]['total'], res[key]['translated'],\n res[key]['fuzzy'], res[key]['suggestions'],\n res[key]['critical'], res[key]['is_dirty'],\n last_action_id, last_updated_id)\n\n self.stdout.write(out)\n\n def dump_all(self, stop_level):\n root = Directory.objects.root\n self._dump_item(root, 0, stop_level=stop_level)\n\n def _dump_item(self, item, level, stop_level):\n self.stdout.write(self.dumped(item))\n if item.is_dir:\n # item is a Directory\n if item.is_project():\n self.stdout.write(self.dumped(item.project))\n elif item.is_language():\n self.stdout.write(self.dumped(item.language))\n elif item.is_translationproject():\n try:\n self.stdout.write(self.dumped(item.translationproject))\n except:\n pass\n else:\n # item should be a Store\n for unit in item.units:\n self.stdout.write(self.dumped(unit))\n for sg in unit.get_suggestions():\n self.stdout.write(self.dumped(sg))\n\n if stop_level != level:\n item.initialize_children()\n if item.children:\n for child in item.children:\n self._dump_item(child, level + 1, stop_level=stop_level)\n\n def dumped(self, item):\n def get_param(param):\n p = getattr(item, param)\n res = p() if callable(p) else p\n res = u\"%s\" % res\n res = res.replace('\\n', '\\\\n')\n return (param, res)\n\n return u\"%d:%s\\t%s\" % \\\n (\n item.id,\n item._meta.object_name,\n \"\\t\".join(\n u\"%s=%s\" % (k, v)\n for k, v in map(get_param, DUMPED[item._meta.object_name])\n )\n )\n", "path": "pootle/apps/pootle_app/management/commands/dump.py"}]} | 1,918 | 742 |
gh_patches_debug_12538 | rasdani/github-patches | git_diff | kivy__python-for-android-3027 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix sqlalchemy recipe
Fix sqlalchemy build errors and bump sqlalchemy from 1.3.3 to 2.0+.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/sqlalchemy/__init__.py`
Content:
```
1 from pythonforandroid.recipe import CompiledComponentsPythonRecipe
2
3
4 class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):
5 name = 'sqlalchemy'
6 version = '1.3.3'
7 url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'
8 call_hostpython_via_targetpython = False
9
10 depends = ['setuptools']
11
12 patches = ['zipsafe.patch']
13
14
15 recipe = SQLAlchemyRecipe()
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/sqlalchemy/__init__.py b/pythonforandroid/recipes/sqlalchemy/__init__.py
--- a/pythonforandroid/recipes/sqlalchemy/__init__.py
+++ b/pythonforandroid/recipes/sqlalchemy/__init__.py
@@ -1,15 +1,15 @@
-from pythonforandroid.recipe import CompiledComponentsPythonRecipe
+from pythonforandroid.recipe import PyProjectRecipe
-class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):
+class SQLAlchemyRecipe(PyProjectRecipe):
name = 'sqlalchemy'
- version = '1.3.3'
- url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'
- call_hostpython_via_targetpython = False
-
+ version = '2.0.30'
+ url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'
depends = ['setuptools']
- patches = ['zipsafe.patch']
+ @property
+ def versioned_url(self):
+ return self.url.format(self.version.replace(".", "_"))
recipe = SQLAlchemyRecipe()
| {"golden_diff": "diff --git a/pythonforandroid/recipes/sqlalchemy/__init__.py b/pythonforandroid/recipes/sqlalchemy/__init__.py\n--- a/pythonforandroid/recipes/sqlalchemy/__init__.py\n+++ b/pythonforandroid/recipes/sqlalchemy/__init__.py\n@@ -1,15 +1,15 @@\n-from pythonforandroid.recipe import CompiledComponentsPythonRecipe\n+from pythonforandroid.recipe import PyProjectRecipe\n \n \n-class SQLAlchemyRecipe(CompiledComponentsPythonRecipe):\n+class SQLAlchemyRecipe(PyProjectRecipe):\n name = 'sqlalchemy'\n- version = '1.3.3'\n- url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'\n- call_hostpython_via_targetpython = False\n-\n+ version = '2.0.30'\n+ url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'\n depends = ['setuptools']\n \n- patches = ['zipsafe.patch']\n+ @property\n+ def versioned_url(self):\n+ return self.url.format(self.version.replace(\".\", \"_\"))\n \n \n recipe = SQLAlchemyRecipe()\n", "issue": "Fix sqlalchemy recipe\nFix sqlalchemy build errors and bump sqlalchemy from 1.3.3 to 2.0+.\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import CompiledComponentsPythonRecipe\n\n\nclass SQLAlchemyRecipe(CompiledComponentsPythonRecipe):\n name = 'sqlalchemy'\n version = '1.3.3'\n url = 'https://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-{version}.tar.gz'\n call_hostpython_via_targetpython = False\n\n depends = ['setuptools']\n\n patches = ['zipsafe.patch']\n\n\nrecipe = SQLAlchemyRecipe()\n", "path": "pythonforandroid/recipes/sqlalchemy/__init__.py"}], "after_files": [{"content": "from pythonforandroid.recipe import PyProjectRecipe\n\n\nclass SQLAlchemyRecipe(PyProjectRecipe):\n name = 'sqlalchemy'\n version = '2.0.30'\n url = 'https://github.com/sqlalchemy/sqlalchemy/archive/refs/tags/rel_{}.tar.gz'\n depends = ['setuptools']\n\n @property\n def versioned_url(self):\n return self.url.format(self.version.replace(\".\", \"_\"))\n\n\nrecipe = SQLAlchemyRecipe()\n", "path": "pythonforandroid/recipes/sqlalchemy/__init__.py"}]} | 413 | 249 |
gh_patches_debug_5816 | rasdani/github-patches | git_diff | pulp__pulpcore-4684 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
file:// sync deletes files from directory
**Version**
Pulpcore 3.39
**Describe the bug**
When syncing file:// repositories, files are disappearing after the sync.
**To Reproduce**
1) Copy these two repositories to the FS:
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2
2) Sync one, then the other
3) See that some files disappeared.
- In my case, file2 lost every file except PULP_MANIFEST
**Expected behavior**
No files disappear.
**Additional context**
This also occurred with RPM content type files.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/download/file.py`
Content:
```
1 import os
2
3 from urllib.parse import urlparse
4
5 import aiofiles
6
7 from .base import BaseDownloader, DownloadResult
8
9
10 class FileDownloader(BaseDownloader):
11 """
12 A downloader for downloading files from the filesystem.
13
14 It provides digest and size validation along with computation of the digests needed to save the
15 file as an Artifact. It writes a new file to the disk and the return path is included in the
16 :class:`~pulpcore.plugin.download.DownloadResult`.
17
18 This downloader has all of the attributes of
19 :class:`~pulpcore.plugin.download.BaseDownloader`
20 """
21
22 def __init__(self, url, *args, **kwargs):
23 """
24 Download files from a url that starts with `file://`
25
26 Args:
27 url (str): The url to the file. This is expected to begin with `file://`
28 kwargs (dict): This accepts the parameters of
29 :class:`~pulpcore.plugin.download.BaseDownloader`.
30
31 Raises:
32 ValidationError: When the url starts with `file://`, but is not a subfolder of a path in
33 the ALLOWED_IMPORT_PATH setting.
34 """
35 from pulpcore.app.serializers import RemoteSerializer
36
37 RemoteSerializer().validate_url(url)
38 p = urlparse(url)
39 self._path = os.path.abspath(os.path.join(p.netloc, p.path))
40 super().__init__(url, *args, **kwargs)
41
42 async def _run(self, extra_data=None):
43 """
44 Read, validate, and compute digests on the `url`. This is a coroutine.
45
46 This method provides the same return object type and documented in
47 :meth:`~pulpcore.plugin.download.BaseDownloader._run`.
48
49 Args:
50 extra_data (dict): Extra data passed to the downloader.
51 """
52 async with aiofiles.open(self._path, "rb") as f_handle:
53 while True:
54 chunk = await f_handle.read(1048576) # 1 megabyte
55 if not chunk:
56 await self.finalize()
57 break # the reading is done
58 await self.handle_data(chunk)
59 return DownloadResult(
60 path=self._path,
61 artifact_attributes=self.artifact_attributes,
62 url=self.url,
63 headers=None,
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py
--- a/pulpcore/download/file.py
+++ b/pulpcore/download/file.py
@@ -57,7 +57,7 @@
break # the reading is done
await self.handle_data(chunk)
return DownloadResult(
- path=self._path,
+ path=self.path,
artifact_attributes=self.artifact_attributes,
url=self.url,
headers=None,
| {"golden_diff": "diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py\n--- a/pulpcore/download/file.py\n+++ b/pulpcore/download/file.py\n@@ -57,7 +57,7 @@\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n- path=self._path,\n+ path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n", "issue": "file:// sync deletes files from directory\n**Version**\r\nPulpcore 3.39\r\n\r\n**Describe the bug**\r\nWhen syncing file:// repositories, files are disappearing after the sync.\r\n\r\n**To Reproduce**\r\n1) Copy these two repositories to the FS:\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2\r\n2) Sync one, then the other\r\n3) See that some files disappeared.\r\n - In my case, file2 lost every file except PULP_MANIFEST\r\n\r\n\r\n**Expected behavior**\r\nNo files disappear.\r\n\r\n**Additional context**\r\nThis also occurred with RPM content type files.\r\n\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self._path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}], "after_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}]} | 1,016 | 99 |
gh_patches_debug_3254 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-43 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django: CursorWrapper expects a sequence or None for params
As seen in this partial traceback:
```
File "python3.6/site-packages/scout_apm/django/instruments/sql.py", line 29, in execute
return self.cursor.execute(sql, params)
File "python3.6/site-packages/django/db/backends/utils.py", line 80, in execute
return super(CursorDebugWrapper, self).execute(sql, params)
File "python3.6/site-packages/django/db/backends/utils.py", line 65, in execute
return self.cursor.execute(sql, params)
IndexError: tuple index out of range
```
Currently making a PR to hopefully fix =)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/django/instruments/sql.py`
Content:
```
1 from __future__ import absolute_import
2 import logging
3
4 from scout_apm.core.monkey import monkeypatch_method
5 from scout_apm.core.tracked_request import TrackedRequest
6
7 try:
8 from django.db.backends.base.base import BaseDatabaseWrapper
9 except ImportError:
10 # Backwards compatibility for Django <1.8
11 from django.db.backends import BaseDatabaseWrapper
12
13 try:
14 from django.db.backends.utils import CursorWrapper
15 except ImportError:
16 # Backwards compatibility for Django <1.9
17 from django.db.backends.util import CursorWrapper
18
19 logger = logging.getLogger(__name__)
20
21
22 class _DetailedTracingCursorWrapper(CursorWrapper):
23 def execute(self, sql, params=()):
24 tr = TrackedRequest.instance()
25 span = tr.start_span(operation='SQL/Query')
26 span.tag('db.statement', sql)
27
28 try:
29 return self.cursor.execute(sql, params)
30 finally:
31 tr.stop_span()
32
33 def executemany(self, sql, param_list):
34 span = TrackedRequest.instance().start_span(operation='SQL/Many')
35 span.tag('db.statement', sql)
36
37 try:
38 return self.cursor.executemany(sql, param_list)
39 finally:
40 TrackedRequest.instance().stop_span()
41
42
43 # pylint: disable=too-few-public-methods
44 class SQLInstrument:
45
46 # The linter thinks the methods we monkeypatch are not used
47 # pylint: disable=W0612
48 # pylint: disable=no-method-argument
49 @staticmethod
50 def install():
51 """
52 Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`
53 method of BaseDatabaseWrapper, to return a wrapper that instruments any
54 calls going through it.
55 """
56 @monkeypatch_method(BaseDatabaseWrapper)
57 def cursor(original, self, *args, **kwargs):
58 result = original(*args, **kwargs)
59 return _DetailedTracingCursorWrapper(result, self)
60
61 logger.debug('Monkey patched SQL')
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/django/instruments/sql.py b/src/scout_apm/django/instruments/sql.py
--- a/src/scout_apm/django/instruments/sql.py
+++ b/src/scout_apm/django/instruments/sql.py
@@ -20,7 +20,7 @@
class _DetailedTracingCursorWrapper(CursorWrapper):
- def execute(self, sql, params=()):
+ def execute(self, sql, params=None):
tr = TrackedRequest.instance()
span = tr.start_span(operation='SQL/Query')
span.tag('db.statement', sql)
| {"golden_diff": "diff --git a/src/scout_apm/django/instruments/sql.py b/src/scout_apm/django/instruments/sql.py\n--- a/src/scout_apm/django/instruments/sql.py\n+++ b/src/scout_apm/django/instruments/sql.py\n@@ -20,7 +20,7 @@\n \n \n class _DetailedTracingCursorWrapper(CursorWrapper):\n- def execute(self, sql, params=()):\n+ def execute(self, sql, params=None):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n", "issue": "Django: CursorWrapper expects a sequence or None for params \nAs seen in this partial traceback:\r\n```\r\n File \"python3.6/site-packages/scout_apm/django/instruments/sql.py\", line 29, in execute\r\n return self.cursor.execute(sql, params)\r\n File \"python3.6/site-packages/django/db/backends/utils.py\", line 80, in execute\r\n return super(CursorDebugWrapper, self).execute(sql, params)\r\n File \"python3.6/site-packages/django/db/backends/utils.py\", line 65, in execute\r\n return self.cursor.execute(sql, params)\r\nIndexError: tuple index out of range\r\n```\r\nCurrently making a PR to hopefully fix =)\n", "before_files": [{"content": "from __future__ import absolute_import\nimport logging\n\nfrom scout_apm.core.monkey import monkeypatch_method\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from django.db.backends.base.base import BaseDatabaseWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.8\n from django.db.backends import BaseDatabaseWrapper\n\ntry:\n from django.db.backends.utils import CursorWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.9\n from django.db.backends.util import CursorWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass _DetailedTracingCursorWrapper(CursorWrapper):\n def execute(self, sql, params=()):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.execute(sql, params)\n finally:\n tr.stop_span()\n\n def executemany(self, sql, param_list):\n span = TrackedRequest.instance().start_span(operation='SQL/Many')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.executemany(sql, param_list)\n finally:\n TrackedRequest.instance().stop_span()\n\n\n# pylint: disable=too-few-public-methods\nclass SQLInstrument:\n\n # The linter thinks the methods we monkeypatch are not used\n # pylint: disable=W0612\n # pylint: disable=no-method-argument\n @staticmethod\n def install():\n \"\"\"\n Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`\n method of BaseDatabaseWrapper, to return a wrapper that instruments any\n calls going through it.\n \"\"\"\n @monkeypatch_method(BaseDatabaseWrapper)\n def cursor(original, self, *args, **kwargs):\n result = original(*args, **kwargs)\n return _DetailedTracingCursorWrapper(result, self)\n\n logger.debug('Monkey patched SQL')\n", "path": "src/scout_apm/django/instruments/sql.py"}], "after_files": [{"content": "from __future__ import absolute_import\nimport logging\n\nfrom scout_apm.core.monkey import monkeypatch_method\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from django.db.backends.base.base import BaseDatabaseWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.8\n from django.db.backends import BaseDatabaseWrapper\n\ntry:\n from django.db.backends.utils import CursorWrapper\nexcept ImportError:\n # Backwards compatibility for Django <1.9\n from django.db.backends.util import CursorWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass _DetailedTracingCursorWrapper(CursorWrapper):\n def execute(self, sql, params=None):\n tr = TrackedRequest.instance()\n span = tr.start_span(operation='SQL/Query')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.execute(sql, params)\n finally:\n tr.stop_span()\n\n def executemany(self, sql, param_list):\n span = TrackedRequest.instance().start_span(operation='SQL/Many')\n span.tag('db.statement', sql)\n\n try:\n return self.cursor.executemany(sql, param_list)\n finally:\n TrackedRequest.instance().stop_span()\n\n\n# pylint: disable=too-few-public-methods\nclass SQLInstrument:\n\n # The linter thinks the methods we monkeypatch are not used\n # pylint: disable=W0612\n # pylint: disable=no-method-argument\n @staticmethod\n def install():\n \"\"\"\n Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor`\n method of BaseDatabaseWrapper, to return a wrapper that instruments any\n calls going through it.\n \"\"\"\n @monkeypatch_method(BaseDatabaseWrapper)\n def cursor(original, self, *args, **kwargs):\n result = original(*args, **kwargs)\n return _DetailedTracingCursorWrapper(result, self)\n\n logger.debug('Monkey patched SQL')\n", "path": "src/scout_apm/django/instruments/sql.py"}]} | 956 | 132 |
gh_patches_debug_28327 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9482 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backtracking/minmax.py`
Content:
```
1 """
2 Minimax helps to achieve maximum score in a game by checking all possible moves.
3
4 """
5 from __future__ import annotations
6
7 import math
8
9
10 def minimax(
11 depth: int, node_index: int, is_max: bool, scores: list[int], height: float
12 ) -> int:
13 """
14 depth is current depth in game tree.
15 node_index is index of current node in scores[].
16 scores[] contains the leaves of game tree.
17 height is maximum height of game tree.
18
19 >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
20 >>> height = math.log(len(scores), 2)
21 >>> minimax(0, 0, True, scores, height)
22 65
23 >>> minimax(-1, 0, True, scores, height)
24 Traceback (most recent call last):
25 ...
26 ValueError: Depth cannot be less than 0
27 >>> minimax(0, 0, True, [], 2)
28 Traceback (most recent call last):
29 ...
30 ValueError: Scores cannot be empty
31 >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
32 >>> height = math.log(len(scores), 2)
33 >>> minimax(0, 0, True, scores, height)
34 12
35 """
36
37 if depth < 0:
38 raise ValueError("Depth cannot be less than 0")
39
40 if not scores:
41 raise ValueError("Scores cannot be empty")
42
43 if depth == height:
44 return scores[node_index]
45
46 return (
47 max(
48 minimax(depth + 1, node_index * 2, False, scores, height),
49 minimax(depth + 1, node_index * 2 + 1, False, scores, height),
50 )
51 if is_max
52 else min(
53 minimax(depth + 1, node_index * 2, True, scores, height),
54 minimax(depth + 1, node_index * 2 + 1, True, scores, height),
55 )
56 )
57
58
59 def main() -> None:
60 scores = [90, 23, 6, 33, 21, 65, 123, 34423]
61 height = math.log(len(scores), 2)
62 print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
63
64
65 if __name__ == "__main__":
66 import doctest
67
68 doctest.testmod()
69 main()
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backtracking/minmax.py b/backtracking/minmax.py
deleted file mode 100644
--- a/backtracking/minmax.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-Minimax helps to achieve maximum score in a game by checking all possible moves.
-
-"""
-from __future__ import annotations
-
-import math
-
-
-def minimax(
- depth: int, node_index: int, is_max: bool, scores: list[int], height: float
-) -> int:
- """
- depth is current depth in game tree.
- node_index is index of current node in scores[].
- scores[] contains the leaves of game tree.
- height is maximum height of game tree.
-
- >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
- >>> height = math.log(len(scores), 2)
- >>> minimax(0, 0, True, scores, height)
- 65
- >>> minimax(-1, 0, True, scores, height)
- Traceback (most recent call last):
- ...
- ValueError: Depth cannot be less than 0
- >>> minimax(0, 0, True, [], 2)
- Traceback (most recent call last):
- ...
- ValueError: Scores cannot be empty
- >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
- >>> height = math.log(len(scores), 2)
- >>> minimax(0, 0, True, scores, height)
- 12
- """
-
- if depth < 0:
- raise ValueError("Depth cannot be less than 0")
-
- if not scores:
- raise ValueError("Scores cannot be empty")
-
- if depth == height:
- return scores[node_index]
-
- return (
- max(
- minimax(depth + 1, node_index * 2, False, scores, height),
- minimax(depth + 1, node_index * 2 + 1, False, scores, height),
- )
- if is_max
- else min(
- minimax(depth + 1, node_index * 2, True, scores, height),
- minimax(depth + 1, node_index * 2 + 1, True, scores, height),
- )
- )
-
-
-def main() -> None:
- scores = [90, 23, 6, 33, 21, 65, 123, 34423]
- height = math.log(len(scores), 2)
- print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
- main()
| {"golden_diff": "diff --git a/backtracking/minmax.py b/backtracking/minmax.py\ndeleted file mode 100644\n--- a/backtracking/minmax.py\n+++ /dev/null\n@@ -1,69 +0,0 @@\n-\"\"\"\n-Minimax helps to achieve maximum score in a game by checking all possible moves.\n-\n-\"\"\"\n-from __future__ import annotations\n-\n-import math\n-\n-\n-def minimax(\n- depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n-) -> int:\n- \"\"\"\n- depth is current depth in game tree.\n- node_index is index of current node in scores[].\n- scores[] contains the leaves of game tree.\n- height is maximum height of game tree.\n-\n- >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n- >>> height = math.log(len(scores), 2)\n- >>> minimax(0, 0, True, scores, height)\n- 65\n- >>> minimax(-1, 0, True, scores, height)\n- Traceback (most recent call last):\n- ...\n- ValueError: Depth cannot be less than 0\n- >>> minimax(0, 0, True, [], 2)\n- Traceback (most recent call last):\n- ...\n- ValueError: Scores cannot be empty\n- >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]\n- >>> height = math.log(len(scores), 2)\n- >>> minimax(0, 0, True, scores, height)\n- 12\n- \"\"\"\n-\n- if depth < 0:\n- raise ValueError(\"Depth cannot be less than 0\")\n-\n- if not scores:\n- raise ValueError(\"Scores cannot be empty\")\n-\n- if depth == height:\n- return scores[node_index]\n-\n- return (\n- max(\n- minimax(depth + 1, node_index * 2, False, scores, height),\n- minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n- )\n- if is_max\n- else min(\n- minimax(depth + 1, node_index * 2, True, scores, height),\n- minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n- )\n- )\n-\n-\n-def main() -> None:\n- scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n- height = math.log(len(scores), 2)\n- print(f\"Optimal value : {minimax(0, 0, True, scores, height)}\")\n-\n-\n-if __name__ == \"__main__\":\n- import doctest\n-\n- doctest.testmod()\n- main()\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "\"\"\"\nMinimax helps to achieve maximum score in a game by checking all possible moves.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport math\n\n\ndef minimax(\n depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n) -> int:\n \"\"\"\n depth is current depth in game tree.\n node_index is index of current node in scores[].\n scores[] contains the leaves of game tree.\n height is maximum height of game tree.\n\n >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n >>> height = math.log(len(scores), 2)\n >>> minimax(0, 0, True, scores, height)\n 65\n >>> minimax(-1, 0, True, scores, height)\n Traceback (most recent call last):\n ...\n ValueError: Depth cannot be less than 0\n >>> minimax(0, 0, True, [], 2)\n Traceback (most recent call last):\n ...\n ValueError: Scores cannot be empty\n >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]\n >>> height = math.log(len(scores), 2)\n >>> minimax(0, 0, True, scores, height)\n 12\n \"\"\"\n\n if depth < 0:\n raise ValueError(\"Depth cannot be less than 0\")\n\n if not scores:\n raise ValueError(\"Scores cannot be empty\")\n\n if depth == height:\n return scores[node_index]\n\n return (\n max(\n minimax(depth + 1, node_index * 2, False, scores, height),\n minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n )\n if is_max\n else min(\n minimax(depth + 1, node_index * 2, True, scores, height),\n minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n )\n )\n\n\ndef main() -> None:\n scores = [90, 23, 6, 33, 21, 65, 123, 34423]\n height = math.log(len(scores), 2)\n print(f\"Optimal value : {minimax(0, 0, True, scores, height)}\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n main()\n", "path": "backtracking/minmax.py"}], "after_files": [{"content": null, "path": "backtracking/minmax.py"}]} | 1,013 | 680 |
gh_patches_debug_25689 | rasdani/github-patches | git_diff | searx__searx-542 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reddit: cannot view image
- search: !re wallpaper dark blue
- select some image
- click "View Image" button
Then the image should be shown but the searx start page is loaded. "View Page" shows the image. This is because image==page but this is an inconsistency (sort of)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/reddit.py`
Content:
```
1 """
2 Reddit
3
4 @website https://www.reddit.com/
5 @provide-api yes (https://www.reddit.com/dev/api)
6
7 @using-api yes
8 @results JSON
9 @stable yes
10 @parse url, title, content, thumbnail, publishedDate
11 """
12
13 import json
14 from cgi import escape
15 from urllib import urlencode
16 from urlparse import urlparse
17 from datetime import datetime
18
19 # engine dependent config
20 categories = ['general', 'images', 'news', 'social media']
21 page_size = 25
22
23 # search-url
24 search_url = 'https://www.reddit.com/search.json?{query}'
25
26
27 # do search-request
28 def request(query, params):
29 query = urlencode({'q': query,
30 'limit': page_size})
31 params['url'] = search_url.format(query=query)
32
33 return params
34
35
36 # get response from search-request
37 def response(resp):
38 img_results = []
39 text_results = []
40
41 search_results = json.loads(resp.text)
42
43 # return empty array if there are no results
44 if 'data' not in search_results:
45 return []
46
47 posts = search_results.get('data', {}).get('children', [])
48
49 # process results
50 for post in posts:
51 data = post['data']
52
53 # extract post information
54 params = {
55 'url': data['url'],
56 'title': data['title']
57 }
58
59 # if thumbnail field contains a valid URL, we need to change template
60 thumbnail = data['thumbnail']
61 url_info = urlparse(thumbnail)
62 # netloc & path
63 if url_info[1] != '' and url_info[2] != '':
64 params['thumbnail_src'] = thumbnail
65 params['template'] = 'images.html'
66 img_results.append(params)
67 else:
68 created = datetime.fromtimestamp(data['created_utc'])
69 content = escape(data['selftext'])
70 if len(content) > 500:
71 content = content[:500] + '...'
72 params['content'] = content
73 params['publishedDate'] = created
74 text_results.append(params)
75
76 # show images first and text results second
77 return img_results + text_results
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py
--- a/searx/engines/reddit.py
+++ b/searx/engines/reddit.py
@@ -13,7 +13,7 @@
import json
from cgi import escape
from urllib import urlencode
-from urlparse import urlparse
+from urlparse import urlparse, urljoin
from datetime import datetime
# engine dependent config
@@ -21,7 +21,8 @@
page_size = 25
# search-url
-search_url = 'https://www.reddit.com/search.json?{query}'
+base_url = 'https://www.reddit.com/'
+search_url = base_url + 'search.json?{query}'
# do search-request
@@ -52,7 +53,7 @@
# extract post information
params = {
- 'url': data['url'],
+ 'url': urljoin(base_url, data['permalink']),
'title': data['title']
}
@@ -61,6 +62,7 @@
url_info = urlparse(thumbnail)
# netloc & path
if url_info[1] != '' and url_info[2] != '':
+ params['img_src'] = data['url']
params['thumbnail_src'] = thumbnail
params['template'] = 'images.html'
img_results.append(params)
| {"golden_diff": "diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py\n--- a/searx/engines/reddit.py\n+++ b/searx/engines/reddit.py\n@@ -13,7 +13,7 @@\n import json\n from cgi import escape\n from urllib import urlencode\n-from urlparse import urlparse\n+from urlparse import urlparse, urljoin\n from datetime import datetime\n \n # engine dependent config\n@@ -21,7 +21,8 @@\n page_size = 25\n \n # search-url\n-search_url = 'https://www.reddit.com/search.json?{query}'\n+base_url = 'https://www.reddit.com/'\n+search_url = base_url + 'search.json?{query}'\n \n \n # do search-request\n@@ -52,7 +53,7 @@\n \n # extract post information\n params = {\n- 'url': data['url'],\n+ 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n \n@@ -61,6 +62,7 @@\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n+ params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n", "issue": "reddit: cannot view image\n- search: !re wallpaper dark blue\n- select some image\n- click \"View Image\" button\n\nThen the image should be shown but the searx start page is loaded. \"View Page\" shows the image. This is because image==page but this is an inconsistency (sort of)\n\n", "before_files": [{"content": "\"\"\"\n Reddit\n\n @website https://www.reddit.com/\n @provide-api yes (https://www.reddit.com/dev/api)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content, thumbnail, publishedDate\n\"\"\"\n\nimport json\nfrom cgi import escape\nfrom urllib import urlencode\nfrom urlparse import urlparse\nfrom datetime import datetime\n\n# engine dependent config\ncategories = ['general', 'images', 'news', 'social media']\npage_size = 25\n\n# search-url\nsearch_url = 'https://www.reddit.com/search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query,\n 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': data['url'],\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = escape(data['selftext'])\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}], "after_files": [{"content": "\"\"\"\n Reddit\n\n @website https://www.reddit.com/\n @provide-api yes (https://www.reddit.com/dev/api)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content, thumbnail, publishedDate\n\"\"\"\n\nimport json\nfrom cgi import escape\nfrom urllib import urlencode\nfrom urlparse import urlparse, urljoin\nfrom datetime import datetime\n\n# engine dependent config\ncategories = ['general', 'images', 'news', 'social media']\npage_size = 25\n\n# search-url\nbase_url = 'https://www.reddit.com/'\nsearch_url = base_url + 'search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query,\n 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = escape(data['selftext'])\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}]} | 952 | 304 |
gh_patches_debug_20819 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2062 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect insights.components.rhel_version module doc
The [example](https://github.com/RedHatInsights/insights-core/blob/master/insights/components/rhel_version.py#L17) in the insights.components.rhel_version pydoc doesn't work. The objects don't have a `rhel_version` attribute.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/components/rhel_version.py`
Content:
```
1 """
2 IsRhel6, IsRhel7 and IsRhel8
3 ===============================
4
5 The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to
6 retrieve the RHEL version information.
7 Each component checks if the release version matches the version it represents,
8 if the version does not match what is expected the class raises ``SkipComponent``
9 so that the dependent component will not fire.
10 Can be added as a dependency of a parser so that the parser only fires if the
11 ``IsRhel*`` dependency is met.
12
13 An example from the following ``/etc/redhat_release`` file output::
14
15 Red Hat Enterprise Linux release 8.0 (Ootpa)
16
17 Example:
18
19 >>> type(IsRhel8)
20 <class 'insights.components.rhel_version.Is_Rhel8'>
21 >>> is_rhel8.rhel_version
22 '8.0'
23 """
24
25 from insights.core.plugins import component
26 from insights.combiners.redhat_release import RedHatRelease
27 from insights.core.dr import SkipComponent
28
29
30 @component(RedHatRelease)
31 class IsRhel6(object):
32 """
33 This component uses ``RedHatRelease`` combiner
34 to determine RHEL version. It checks if RHEL6, if not
35 RHEL6 it raises ``SkipComponent``.
36
37 Raises:
38 SkipComponent: When RHEL version is not RHEL6.
39 """
40 def __init__(self, rhel):
41 if rhel.major != 6:
42 raise SkipComponent('Not RHEL6')
43
44
45 @component(RedHatRelease)
46 class IsRhel7(object):
47 """
48 This component uses ``RedHatRelease`` combiner
49 to determine RHEL version. It checks if RHEL7, if not \
50 RHEL7 it raises ``SkipComponent``.
51
52 Raises:
53 SkipComponent: When RHEL version is not RHEL7.
54 """
55 def __init__(self, rhel):
56 if rhel.major != 7:
57 raise SkipComponent('Not RHEL7')
58
59
60 @component(RedHatRelease)
61 class IsRhel8(object):
62 """
63 This component uses ``RedhatRelease`` combiner
64 to determine RHEL version. It checks if RHEL8, if not
65 RHEL8 it raises ``SkipComponent``.
66
67 Raises:
68 SkipComponent: When RHEL version is not RHEL8.
69 """
70 def __init__(self, rhel):
71 if rhel.major != 8:
72 raise SkipComponent('Not RHEL8')
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py
--- a/insights/components/rhel_version.py
+++ b/insights/components/rhel_version.py
@@ -2,24 +2,14 @@
IsRhel6, IsRhel7 and IsRhel8
===============================
-The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to
-retrieve the RHEL version information.
-Each component checks if the release version matches the version it represents,
-if the version does not match what is expected the class raises ``SkipComponent``
-so that the dependent component will not fire.
-Can be added as a dependency of a parser so that the parser only fires if the
-``IsRhel*`` dependency is met.
-
-An example from the following ``/etc/redhat_release`` file output::
-
- Red Hat Enterprise Linux release 8.0 (Ootpa)
-
-Example:
-
- >>> type(IsRhel8)
- <class 'insights.components.rhel_version.Is_Rhel8'>
- >>> is_rhel8.rhel_version
- '8.0'
+An ``IsRhel*`` component is valid if the
+:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates
+the major RHEL version represented by the component. Otherwise, it raises a
+:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from
+executing.
+
+In particular, an ``IsRhel*`` component can be added as a dependency of a
+parser to limit it to a given version.
"""
from insights.core.plugins import component
| {"golden_diff": "diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py\n--- a/insights/components/rhel_version.py\n+++ b/insights/components/rhel_version.py\n@@ -2,24 +2,14 @@\n IsRhel6, IsRhel7 and IsRhel8\n ===============================\n \n-The ``IsRhel*`` components each use the ``RedhatRelease`` combiner to\n-retrieve the RHEL version information.\n-Each component checks if the release version matches the version it represents,\n-if the version does not match what is expected the class raises ``SkipComponent``\n-so that the dependent component will not fire.\n-Can be added as a dependency of a parser so that the parser only fires if the\n-``IsRhel*`` dependency is met.\n-\n-An example from the following ``/etc/redhat_release`` file output::\n-\n- Red Hat Enterprise Linux release 8.0 (Ootpa)\n-\n-Example:\n-\n- >>> type(IsRhel8)\n- <class 'insights.components.rhel_version.Is_Rhel8'>\n- >>> is_rhel8.rhel_version\n- '8.0'\n+An ``IsRhel*`` component is valid if the\n+:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates\n+the major RHEL version represented by the component. Otherwise, it raises a\n+:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from\n+executing.\n+\n+In particular, an ``IsRhel*`` component can be added as a dependency of a\n+parser to limit it to a given version.\n \"\"\"\n \n from insights.core.plugins import component\n", "issue": "Incorrect insights.components.rhel_version module doc\nThe [example](https://github.com/RedHatInsights/insights-core/blob/master/insights/components/rhel_version.py#L17) in the insights.components.rhel_version pydoc doesn't work. The objects don't have a `rhel_version` attribute.\n", "before_files": [{"content": "\"\"\"\nIsRhel6, IsRhel7 and IsRhel8\n===============================\n\nThe ``IsRhel*`` components each use the ``RedhatRelease`` combiner to\nretrieve the RHEL version information.\nEach component checks if the release version matches the version it represents,\nif the version does not match what is expected the class raises ``SkipComponent``\nso that the dependent component will not fire.\nCan be added as a dependency of a parser so that the parser only fires if the\n``IsRhel*`` dependency is met.\n\nAn example from the following ``/etc/redhat_release`` file output::\n\n Red Hat Enterprise Linux release 8.0 (Ootpa)\n\nExample:\n\n >>> type(IsRhel8)\n <class 'insights.components.rhel_version.Is_Rhel8'>\n >>> is_rhel8.rhel_version\n '8.0'\n\"\"\"\n\nfrom insights.core.plugins import component\nfrom insights.combiners.redhat_release import RedHatRelease\nfrom insights.core.dr import SkipComponent\n\n\n@component(RedHatRelease)\nclass IsRhel6(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL6, if not\n RHEL6 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL6.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 6:\n raise SkipComponent('Not RHEL6')\n\n\n@component(RedHatRelease)\nclass IsRhel7(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL7, if not \\\n RHEL7 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL7.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 7:\n raise SkipComponent('Not RHEL7')\n\n\n@component(RedHatRelease)\nclass IsRhel8(object):\n \"\"\"\n This component uses ``RedhatRelease`` combiner\n to determine RHEL version. It checks if RHEL8, if not\n RHEL8 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL8.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 8:\n raise SkipComponent('Not RHEL8')\n", "path": "insights/components/rhel_version.py"}], "after_files": [{"content": "\"\"\"\nIsRhel6, IsRhel7 and IsRhel8\n===============================\n\nAn ``IsRhel*`` component is valid if the\n:py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates\nthe major RHEL version represented by the component. Otherwise, it raises a\n:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from\nexecuting.\n\nIn particular, an ``IsRhel*`` component can be added as a dependency of a\nparser to limit it to a given version.\n\"\"\"\n\nfrom insights.core.plugins import component\nfrom insights.combiners.redhat_release import RedHatRelease\nfrom insights.core.dr import SkipComponent\n\n\n@component(RedHatRelease)\nclass IsRhel6(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL6, if not\n RHEL6 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL6.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 6:\n raise SkipComponent('Not RHEL6')\n\n\n@component(RedHatRelease)\nclass IsRhel7(object):\n \"\"\"\n This component uses ``RedHatRelease`` combiner\n to determine RHEL version. It checks if RHEL7, if not \\\n RHEL7 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL7.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 7:\n raise SkipComponent('Not RHEL7')\n\n\n@component(RedHatRelease)\nclass IsRhel8(object):\n \"\"\"\n This component uses ``RedhatRelease`` combiner\n to determine RHEL version. It checks if RHEL8, if not\n RHEL8 it raises ``SkipComponent``.\n\n Raises:\n SkipComponent: When RHEL version is not RHEL8.\n \"\"\"\n def __init__(self, rhel):\n if rhel.major != 8:\n raise SkipComponent('Not RHEL8')\n", "path": "insights/components/rhel_version.py"}]} | 1,003 | 365 |
gh_patches_debug_20533 | rasdani/github-patches | git_diff | nilearn__nilearn-2264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strange comment in ICA example
This is the comment:
# XXX: must get the code to run for more than 1 subject
And this is the link to the example:
https://nilearn.github.io/auto_examples/05_advanced/plot_ica_resting_state.html
The comment appears to relate to an internal todo rather than comment/instruction to users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/05_advanced/plot_ica_resting_state.py`
Content:
```
1 """
2 Multivariate decompositions: Independent component analysis of fMRI
3 ===================================================================
4
5
6 This example is meant to demonstrate nilearn as a low-level tools used to
7 combine feature extraction with a multivariate decomposition algorithm
8 for movie-watching.
9
10 This example is a toy. To apply ICA to fmri timeseries data, it is advised
11 to look at the example
12 :ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.
13
14 The example here applies the scikit-learn ICA to movie watching timeseries data.
15 Note that following the code in the example, any unsupervised
16 decomposition model, or other latent-factor models, can be applied to
17 the data, as the scikit-learn API enables to exchange them as almost
18 black box (though the relevant parameter for brain maps might no longer
19 be given by a call to fit_transform).
20
21 """
22
23 #####################################################################
24 # Load movie watching dataset
25 from nilearn import datasets
26 # Here we use only single subject to get faster-running code. For better
27 # results, simply increase this number
28 # XXX: must get the code to run for more than 1 subject
29 dataset = datasets.fetch_development_fmri(n_subjects=1)
30 func_filename = dataset.func[0]
31
32 # print basic information on the dataset
33 print('First subject functional nifti image (4D) is at: %s' %
34 dataset.func[0]) # 4D data
35
36
37 #####################################################################
38 # Preprocess
39 from nilearn.input_data import NiftiMasker
40
41 # This is fmri timeseries data: the background has not been removed yet,
42 # thus we need to use mask_strategy='epi' to compute the mask from the
43 # EPI images
44 masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,
45 mask_strategy='epi', standardize=True)
46 data_masked = masker.fit_transform(func_filename)
47
48 # Concatenate all the subjects
49 # fmri_data = np.concatenate(data_masked, axis=1)
50 fmri_data = data_masked
51
52
53 #####################################################################
54 # Apply ICA
55
56 from sklearn.decomposition import FastICA
57 n_components = 10
58 ica = FastICA(n_components=n_components, random_state=42)
59 components_masked = ica.fit_transform(data_masked.T).T
60
61 # Normalize estimated components, for thresholding to make sense
62 components_masked -= components_masked.mean(axis=0)
63 components_masked /= components_masked.std(axis=0)
64 # Threshold
65 import numpy as np
66 components_masked[np.abs(components_masked) < .8] = 0
67
68 # Now invert the masking operation, going back to a full 3D
69 # representation
70 component_img = masker.inverse_transform(components_masked)
71
72 #####################################################################
73 # Visualize the results
74
75 # Show some interesting components
76 from nilearn import image
77 from nilearn.plotting import plot_stat_map, show
78
79 # Use the mean as a background
80 mean_img = image.mean_img(func_filename)
81
82 plot_stat_map(image.index_img(component_img, 0), mean_img)
83
84 plot_stat_map(image.index_img(component_img, 1), mean_img)
85
86 show()
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py
--- a/examples/05_advanced/plot_ica_resting_state.py
+++ b/examples/05_advanced/plot_ica_resting_state.py
@@ -23,9 +23,7 @@
#####################################################################
# Load movie watching dataset
from nilearn import datasets
-# Here we use only single subject to get faster-running code. For better
-# results, simply increase this number
-# XXX: must get the code to run for more than 1 subject
+# Here we use only single subject to get faster-running code.
dataset = datasets.fetch_development_fmri(n_subjects=1)
func_filename = dataset.func[0]
@@ -45,10 +43,6 @@
mask_strategy='epi', standardize=True)
data_masked = masker.fit_transform(func_filename)
-# Concatenate all the subjects
-# fmri_data = np.concatenate(data_masked, axis=1)
-fmri_data = data_masked
-
#####################################################################
# Apply ICA
| {"golden_diff": "diff --git a/examples/05_advanced/plot_ica_resting_state.py b/examples/05_advanced/plot_ica_resting_state.py\n--- a/examples/05_advanced/plot_ica_resting_state.py\n+++ b/examples/05_advanced/plot_ica_resting_state.py\n@@ -23,9 +23,7 @@\n #####################################################################\n # Load movie watching dataset\n from nilearn import datasets\n-# Here we use only single subject to get faster-running code. For better\n-# results, simply increase this number\n-# XXX: must get the code to run for more than 1 subject\n+# Here we use only single subject to get faster-running code.\n dataset = datasets.fetch_development_fmri(n_subjects=1)\n func_filename = dataset.func[0]\n \n@@ -45,10 +43,6 @@\n mask_strategy='epi', standardize=True)\n data_masked = masker.fit_transform(func_filename)\n \n-# Concatenate all the subjects\n-# fmri_data = np.concatenate(data_masked, axis=1)\n-fmri_data = data_masked\n-\n \n #####################################################################\n # Apply ICA\n", "issue": "Strange comment in ICA example\nThis is the comment:\r\n# XXX: must get the code to run for more than 1 subject\r\nAnd this is the link to the example:\r\nhttps://nilearn.github.io/auto_examples/05_advanced/plot_ica_resting_state.html\r\n\r\nThe comment appears to relate to an internal todo rather than comment/instruction to users.\n", "before_files": [{"content": "\"\"\"\nMultivariate decompositions: Independent component analysis of fMRI\n===================================================================\n\n\nThis example is meant to demonstrate nilearn as a low-level tools used to\ncombine feature extraction with a multivariate decomposition algorithm\nfor movie-watching.\n\nThis example is a toy. To apply ICA to fmri timeseries data, it is advised\nto look at the example\n:ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.\n\nThe example here applies the scikit-learn ICA to movie watching timeseries data.\nNote that following the code in the example, any unsupervised\ndecomposition model, or other latent-factor models, can be applied to\nthe data, as the scikit-learn API enables to exchange them as almost\nblack box (though the relevant parameter for brain maps might no longer\nbe given by a call to fit_transform).\n\n\"\"\"\n\n#####################################################################\n# Load movie watching dataset\nfrom nilearn import datasets\n# Here we use only single subject to get faster-running code. For better\n# results, simply increase this number\n# XXX: must get the code to run for more than 1 subject\ndataset = datasets.fetch_development_fmri(n_subjects=1)\nfunc_filename = dataset.func[0]\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n dataset.func[0]) # 4D data\n\n\n#####################################################################\n# Preprocess\nfrom nilearn.input_data import NiftiMasker\n\n# This is fmri timeseries data: the background has not been removed yet,\n# thus we need to use mask_strategy='epi' to compute the mask from the\n# EPI images\nmasker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,\n mask_strategy='epi', standardize=True)\ndata_masked = masker.fit_transform(func_filename)\n\n# Concatenate all the subjects\n# fmri_data = np.concatenate(data_masked, axis=1)\nfmri_data = data_masked\n\n\n#####################################################################\n# Apply ICA\n\nfrom sklearn.decomposition import FastICA\nn_components = 10\nica = FastICA(n_components=n_components, random_state=42)\ncomponents_masked = ica.fit_transform(data_masked.T).T\n\n# Normalize estimated components, for thresholding to make sense\ncomponents_masked -= components_masked.mean(axis=0)\ncomponents_masked /= components_masked.std(axis=0)\n# Threshold\nimport numpy as np\ncomponents_masked[np.abs(components_masked) < .8] = 0\n\n# Now invert the masking operation, going back to a full 3D\n# representation\ncomponent_img = masker.inverse_transform(components_masked)\n\n#####################################################################\n# Visualize the results\n\n# Show some interesting components\nfrom nilearn import image\nfrom nilearn.plotting import plot_stat_map, show\n\n# Use the mean as a background\nmean_img = image.mean_img(func_filename)\n\nplot_stat_map(image.index_img(component_img, 0), mean_img)\n\nplot_stat_map(image.index_img(component_img, 1), mean_img)\n\nshow()\n", "path": "examples/05_advanced/plot_ica_resting_state.py"}], "after_files": [{"content": "\"\"\"\nMultivariate decompositions: Independent component analysis of fMRI\n===================================================================\n\n\nThis example is meant to demonstrate nilearn as a low-level tools used to\ncombine feature extraction with a multivariate decomposition algorithm\nfor movie-watching.\n\nThis example is a toy. To apply ICA to fmri timeseries data, it is advised\nto look at the example\n:ref:`sphx_glr_auto_examples_03_connectivity_plot_compare_decomposition.py`.\n\nThe example here applies the scikit-learn ICA to movie watching timeseries data.\nNote that following the code in the example, any unsupervised\ndecomposition model, or other latent-factor models, can be applied to\nthe data, as the scikit-learn API enables to exchange them as almost\nblack box (though the relevant parameter for brain maps might no longer\nbe given by a call to fit_transform).\n\n\"\"\"\n\n#####################################################################\n# Load movie watching dataset\nfrom nilearn import datasets\n# Here we use only single subject to get faster-running code.\ndataset = datasets.fetch_development_fmri(n_subjects=1)\nfunc_filename = dataset.func[0]\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n dataset.func[0]) # 4D data\n\n\n#####################################################################\n# Preprocess\nfrom nilearn.input_data import NiftiMasker\n\n# This is fmri timeseries data: the background has not been removed yet,\n# thus we need to use mask_strategy='epi' to compute the mask from the\n# EPI images\nmasker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,\n mask_strategy='epi', standardize=True)\ndata_masked = masker.fit_transform(func_filename)\n\n\n#####################################################################\n# Apply ICA\n\nfrom sklearn.decomposition import FastICA\nn_components = 10\nica = FastICA(n_components=n_components, random_state=42)\ncomponents_masked = ica.fit_transform(data_masked.T).T\n\n# Normalize estimated components, for thresholding to make sense\ncomponents_masked -= components_masked.mean(axis=0)\ncomponents_masked /= components_masked.std(axis=0)\n# Threshold\nimport numpy as np\ncomponents_masked[np.abs(components_masked) < .8] = 0\n\n# Now invert the masking operation, going back to a full 3D\n# representation\ncomponent_img = masker.inverse_transform(components_masked)\n\n#####################################################################\n# Visualize the results\n\n# Show some interesting components\nfrom nilearn import image\nfrom nilearn.plotting import plot_stat_map, show\n\n# Use the mean as a background\nmean_img = image.mean_img(func_filename)\n\nplot_stat_map(image.index_img(component_img, 0), mean_img)\n\nplot_stat_map(image.index_img(component_img, 1), mean_img)\n\nshow()\n", "path": "examples/05_advanced/plot_ica_resting_state.py"}]} | 1,197 | 248 |
gh_patches_debug_26036 | rasdani/github-patches | git_diff | python-discord__bot-1293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter functionality for emoji-spam messages
Now that someone's figured out you can cause havok by dumping massive amounts of emojis into a channel, we need a filter to automatically manage this ASAP. Detection should be fairly simple, a very high emoji count is required for the effect (>20/message to >250 for varying impact) but emoji variance is not required.
Tangentially: This is also likely related to a discord update that went out that has slowed the emoji loading process recently.
I think we can safely filter out and autodelete high emoji messages without significant false positives.
We may want to handle both unicode emojis and server emojis.
It's possible for unicode emojis to get a higher "load" because they could as one character for message transmission purposes but server emojis count for more. However, server emojis may be animated, and will apply significant load by that fact as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/rules/discord_emojis.py`
Content:
```
1 import re
2 from typing import Dict, Iterable, List, Optional, Tuple
3
4 from discord import Member, Message
5
6
7 DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>")
8 CODE_BLOCK_RE = re.compile(r"```.*?```", flags=re.DOTALL)
9
10
11 async def apply(
12 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
13 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
14 """Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user."""
15 relevant_messages = tuple(
16 msg
17 for msg in recent_messages
18 if msg.author == last_message.author
19 )
20
21 # Get rid of code blocks in the message before searching for emojis.
22 total_emojis = sum(
23 len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub("", msg.content)))
24 for msg in relevant_messages
25 )
26
27 if total_emojis > config['max']:
28 return (
29 f"sent {total_emojis} emojis in {config['interval']}s",
30 (last_message.author,),
31 relevant_messages
32 )
33 return None
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/rules/discord_emojis.py b/bot/rules/discord_emojis.py
--- a/bot/rules/discord_emojis.py
+++ b/bot/rules/discord_emojis.py
@@ -2,16 +2,17 @@
from typing import Dict, Iterable, List, Optional, Tuple
from discord import Member, Message
+from emoji import demojize
-DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>")
+DISCORD_EMOJI_RE = re.compile(r"<:\w+:\d+>|:\w+:")
CODE_BLOCK_RE = re.compile(r"```.*?```", flags=re.DOTALL)
async def apply(
last_message: Message, recent_messages: List[Message], config: Dict[str, int]
) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
- """Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user."""
+ """Detects total Discord emojis exceeding the limit sent by a single user."""
relevant_messages = tuple(
msg
for msg in recent_messages
@@ -19,8 +20,9 @@
)
# Get rid of code blocks in the message before searching for emojis.
+ # Convert Unicode emojis to :emoji: format to get their count.
total_emojis = sum(
- len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub("", msg.content)))
+ len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub("", msg.content))))
for msg in relevant_messages
)
| {"golden_diff": "diff --git a/bot/rules/discord_emojis.py b/bot/rules/discord_emojis.py\n--- a/bot/rules/discord_emojis.py\n+++ b/bot/rules/discord_emojis.py\n@@ -2,16 +2,17 @@\n from typing import Dict, Iterable, List, Optional, Tuple\n \n from discord import Member, Message\n+from emoji import demojize\n \n \n-DISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>\")\n+DISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>|:\\w+:\")\n CODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n \n \n async def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n- \"\"\"Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user.\"\"\"\n+ \"\"\"Detects total Discord emojis exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n@@ -19,8 +20,9 @@\n )\n \n # Get rid of code blocks in the message before searching for emojis.\n+ # Convert Unicode emojis to :emoji: format to get their count.\n total_emojis = sum(\n- len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub(\"\", msg.content)))\n+ len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub(\"\", msg.content))))\n for msg in relevant_messages\n )\n", "issue": "Filter functionality for emoji-spam messages\nNow that someone's figured out you can cause havok by dumping massive amounts of emojis into a channel, we need a filter to automatically manage this ASAP. Detection should be fairly simple, a very high emoji count is required for the effect (>20/message to >250 for varying impact) but emoji variance is not required. \r\n\r\nTangentially: This is also likely related to a discord update that went out that has slowed the emoji loading process recently.\r\n\r\nI think we can safely filter out and autodelete high emoji messages without significant false positives. \r\n\r\nWe may want to handle both unicode emojis and server emojis. \r\n\r\nIt's possible for unicode emojis to get a higher \"load\" because they could as one character for message transmission purposes but server emojis count for more. However, server emojis may be animated, and will apply significant load by that fact as well.\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nDISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>\")\nCODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total Discord emojis (excluding Unicode emojis) exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if msg.author == last_message.author\n )\n\n # Get rid of code blocks in the message before searching for emojis.\n total_emojis = sum(\n len(DISCORD_EMOJI_RE.findall(CODE_BLOCK_RE.sub(\"\", msg.content)))\n for msg in relevant_messages\n )\n\n if total_emojis > config['max']:\n return (\n f\"sent {total_emojis} emojis in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/discord_emojis.py"}], "after_files": [{"content": "import re\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\nfrom emoji import demojize\n\n\nDISCORD_EMOJI_RE = re.compile(r\"<:\\w+:\\d+>|:\\w+:\")\nCODE_BLOCK_RE = re.compile(r\"```.*?```\", flags=re.DOTALL)\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total Discord emojis exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if msg.author == last_message.author\n )\n\n # Get rid of code blocks in the message before searching for emojis.\n # Convert Unicode emojis to :emoji: format to get their count.\n total_emojis = sum(\n len(DISCORD_EMOJI_RE.findall(demojize(CODE_BLOCK_RE.sub(\"\", msg.content))))\n for msg in relevant_messages\n )\n\n if total_emojis > config['max']:\n return (\n f\"sent {total_emojis} emojis in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/discord_emojis.py"}]} | 760 | 351 |
gh_patches_debug_51560 | rasdani/github-patches | git_diff | ray-project__ray-10593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the multi-worker feature for Java worker experimental
Right now, the multi-worker feature for Java worker is enabled by default, but the `ActorHandle::kill()` API doesn't work well if multi-worker is enabled because it will kill the whole process instead of one worker in the process.
To avoid complaints from Java users, we should disable the multi-worker feature by default, but we still enable it in unit test.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/job_config.py`
Content:
```
1 import ray
2
3
4 class JobConfig:
5 """A class used to store the configurations of a job.
6
7 Attributes:
8 worker_env (dict): Environment variables to be set on worker
9 processes.
10 num_java_workers_per_process (int): The number of java workers per
11 worker process.
12 jvm_options (str[]): The jvm options for java workers of the job.
13 """
14
15 def __init__(
16 self,
17 worker_env=None,
18 num_java_workers_per_process=10,
19 jvm_options=None,
20 ):
21 if worker_env is None:
22 self.worker_env = dict()
23 else:
24 self.worker_env = worker_env
25 self.num_java_workers_per_process = num_java_workers_per_process
26 if jvm_options is None:
27 self.jvm_options = []
28 else:
29 self.jvm_options = jvm_options
30
31 def serialize(self):
32 job_config = ray.gcs_utils.JobConfig()
33 for key in self.worker_env:
34 job_config.worker_env[key] = self.worker_env[key]
35 job_config.num_java_workers_per_process = (
36 self.num_java_workers_per_process)
37 job_config.jvm_options.extend(self.jvm_options)
38 return job_config.SerializeToString()
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/job_config.py b/python/ray/job_config.py
--- a/python/ray/job_config.py
+++ b/python/ray/job_config.py
@@ -15,7 +15,7 @@
def __init__(
self,
worker_env=None,
- num_java_workers_per_process=10,
+ num_java_workers_per_process=1,
jvm_options=None,
):
if worker_env is None:
| {"golden_diff": "diff --git a/python/ray/job_config.py b/python/ray/job_config.py\n--- a/python/ray/job_config.py\n+++ b/python/ray/job_config.py\n@@ -15,7 +15,7 @@\n def __init__(\n self,\n worker_env=None,\n- num_java_workers_per_process=10,\n+ num_java_workers_per_process=1,\n jvm_options=None,\n ):\n if worker_env is None:\n", "issue": "Make the multi-worker feature for Java worker experimental\nRight now, the multi-worker feature for Java worker is enabled by default, but the `ActorHandle::kill()` API doesn't work well if multi-worker is enabled because it will kill the whole process instead of one worker in the process.\r\n\r\nTo avoid complaints from Java users, we should disable the multi-worker feature by default, but we still enable it in unit test.\n", "before_files": [{"content": "import ray\n\n\nclass JobConfig:\n \"\"\"A class used to store the configurations of a job.\n\n Attributes:\n worker_env (dict): Environment variables to be set on worker\n processes.\n num_java_workers_per_process (int): The number of java workers per\n worker process.\n jvm_options (str[]): The jvm options for java workers of the job.\n \"\"\"\n\n def __init__(\n self,\n worker_env=None,\n num_java_workers_per_process=10,\n jvm_options=None,\n ):\n if worker_env is None:\n self.worker_env = dict()\n else:\n self.worker_env = worker_env\n self.num_java_workers_per_process = num_java_workers_per_process\n if jvm_options is None:\n self.jvm_options = []\n else:\n self.jvm_options = jvm_options\n\n def serialize(self):\n job_config = ray.gcs_utils.JobConfig()\n for key in self.worker_env:\n job_config.worker_env[key] = self.worker_env[key]\n job_config.num_java_workers_per_process = (\n self.num_java_workers_per_process)\n job_config.jvm_options.extend(self.jvm_options)\n return job_config.SerializeToString()\n", "path": "python/ray/job_config.py"}], "after_files": [{"content": "import ray\n\n\nclass JobConfig:\n \"\"\"A class used to store the configurations of a job.\n\n Attributes:\n worker_env (dict): Environment variables to be set on worker\n processes.\n num_java_workers_per_process (int): The number of java workers per\n worker process.\n jvm_options (str[]): The jvm options for java workers of the job.\n \"\"\"\n\n def __init__(\n self,\n worker_env=None,\n num_java_workers_per_process=1,\n jvm_options=None,\n ):\n if worker_env is None:\n self.worker_env = dict()\n else:\n self.worker_env = worker_env\n self.num_java_workers_per_process = num_java_workers_per_process\n if jvm_options is None:\n self.jvm_options = []\n else:\n self.jvm_options = jvm_options\n\n def serialize(self):\n job_config = ray.gcs_utils.JobConfig()\n for key in self.worker_env:\n job_config.worker_env[key] = self.worker_env[key]\n job_config.num_java_workers_per_process = (\n self.num_java_workers_per_process)\n job_config.jvm_options.extend(self.jvm_options)\n return job_config.SerializeToString()\n", "path": "python/ray/job_config.py"}]} | 670 | 97 |
gh_patches_debug_28555 | rasdani/github-patches | git_diff | archlinux__archinstall-418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AMD graphics driver selection unclear
For AMD, the options are by no means obvious in what they actually do.
The recommended choice should in my opinion be mesa (packages mesa and radeon-vulkan).
If you want to go by vendor in the first table, then mesa should be moved into the secondary table, i.e. when selecting AMD you get the choice between AMDVLK and mesa. The current situation is just confusing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/hardware.py`
Content:
```
1 import os, subprocess, json
2 from .general import sys_command
3 from .networking import list_interfaces, enrichIfaceTypes
4 from typing import Optional
5
6 __packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']
7
8 AVAILABLE_GFX_DRIVERS = {
9 # Sub-dicts are layer-2 options to be selected
10 # and lists are a list of packages to be installed
11 'AMD / ATI' : {
12 'amd' : ['xf86-video-amdgpu'],
13 'ati' : ['xf86-video-ati']
14 },
15 'intel' : ['xf86-video-intel'],
16 'nvidia' : {
17 'open-source' : ['xf86-video-nouveau'],
18 'proprietary' : ['nvidia']
19 },
20 'mesa' : ['mesa'],
21 'fbdev' : ['xf86-video-fbdev'],
22 'vesa' : ['xf86-video-vesa'],
23 'vmware / virtualbox' : ['xf86-video-vmware']
24 }
25
26 def hasWifi()->bool:
27 return 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()
28
29 def hasAMDCPU()->bool:
30 if subprocess.check_output("lscpu | grep AMD", shell=True).strip().decode():
31 return True
32 return False
33 def hasIntelCPU()->bool:
34 if subprocess.check_output("lscpu | grep Intel", shell=True).strip().decode():
35 return True
36 return False
37
38 def hasUEFI()->bool:
39 return os.path.isdir('/sys/firmware/efi')
40
41 def graphicsDevices()->dict:
42 cards = {}
43 for line in sys_command(f"lspci"):
44 if b' VGA ' in line:
45 _, identifier = line.split(b': ',1)
46 cards[identifier.strip().lower().decode('UTF-8')] = line
47 return cards
48
49 def hasNvidiaGraphics()->bool:
50 return any('nvidia' in x for x in graphicsDevices())
51
52 def hasAmdGraphics()->bool:
53 return any('amd' in x for x in graphicsDevices())
54
55 def hasIntelGraphics()->bool:
56 return any('intel' in x for x in graphicsDevices())
57
58
59 def cpuVendor()-> Optional[str]:
60 cpu_info = json.loads(subprocess.check_output("lscpu -J", shell=True).decode('utf-8'))['lscpu']
61 for info in cpu_info:
62 if info.get('field',None):
63 if info.get('field',None) == "Vendor ID:":
64 return info.get('data',None)
65
66 def isVM() -> bool:
67 try:
68 subprocess.check_call(["systemd-detect-virt"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine
69 return True
70 except:
71 return False
72
73 # TODO: Add more identifiers
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py
--- a/archinstall/lib/hardware.py
+++ b/archinstall/lib/hardware.py
@@ -3,24 +3,53 @@
from .networking import list_interfaces, enrichIfaceTypes
from typing import Optional
-__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']
+__packages__ = [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "xf86-video-nouveau",
+ "xf86-video-vmware",
+ "libva-mesa-driver",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-radeon",
+ "vulkan-intel",
+ "nvidia",
+]
AVAILABLE_GFX_DRIVERS = {
# Sub-dicts are layer-2 options to be selected
# and lists are a list of packages to be installed
- 'AMD / ATI' : {
- 'amd' : ['xf86-video-amdgpu'],
- 'ati' : ['xf86-video-ati']
+ "All open-source (default)": [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "xf86-video-nouveau",
+ "xf86-video-vmware",
+ "libva-mesa-driver",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-radeon",
+ "vulkan-intel",
+ ],
+ "AMD / ATI (open-source)": [
+ "mesa",
+ "xf86-video-amdgpu",
+ "xf86-video-ati",
+ "libva-mesa-driver",
+ "vulkan-radeon",
+ ],
+ "Intel (open-source)": [
+ "mesa",
+ "libva-intel-driver",
+ "intel-media-driver",
+ "vulkan-intel",
+ ],
+ "Nvidia": {
+ "open-source": ["mesa", "xf86-video-nouveau", "libva-mesa-driver"],
+ "proprietary": ["nvidia"],
},
- 'intel' : ['xf86-video-intel'],
- 'nvidia' : {
- 'open-source' : ['xf86-video-nouveau'],
- 'proprietary' : ['nvidia']
- },
- 'mesa' : ['mesa'],
- 'fbdev' : ['xf86-video-fbdev'],
- 'vesa' : ['xf86-video-vesa'],
- 'vmware / virtualbox' : ['xf86-video-vmware']
+ "VMware / VirtualBox (open-source)": ["mesa", "xf86-video-vmware"],
}
def hasWifi()->bool:
| {"golden_diff": "diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py\n--- a/archinstall/lib/hardware.py\n+++ b/archinstall/lib/hardware.py\n@@ -3,24 +3,53 @@\n from .networking import list_interfaces, enrichIfaceTypes\n from typing import Optional\n \n-__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']\n+__packages__ = [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"xf86-video-nouveau\",\n+\t\t\"xf86-video-vmware\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-radeon\",\n+\t\t\"vulkan-intel\",\n+\t\t\"nvidia\",\n+]\n \n AVAILABLE_GFX_DRIVERS = {\n \t# Sub-dicts are layer-2 options to be selected\n \t# and lists are a list of packages to be installed\n-\t'AMD / ATI' : {\n-\t\t'amd' : ['xf86-video-amdgpu'],\n-\t\t'ati' : ['xf86-video-ati']\n+\t\"All open-source (default)\": [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"xf86-video-nouveau\",\n+\t\t\"xf86-video-vmware\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-radeon\",\n+\t\t\"vulkan-intel\",\n+\t],\n+\t\"AMD / ATI (open-source)\": [\n+\t\t\"mesa\",\n+\t\t\"xf86-video-amdgpu\",\n+\t\t\"xf86-video-ati\",\n+\t\t\"libva-mesa-driver\",\n+\t\t\"vulkan-radeon\",\n+\t],\n+\t\"Intel (open-source)\": [\n+\t\t\"mesa\",\n+\t\t\"libva-intel-driver\",\n+\t\t\"intel-media-driver\",\n+\t\t\"vulkan-intel\",\n+\t],\n+\t\"Nvidia\": {\n+\t\t\"open-source\": [\"mesa\", \"xf86-video-nouveau\", \"libva-mesa-driver\"],\n+\t\t\"proprietary\": [\"nvidia\"],\n \t},\n-\t'intel' : ['xf86-video-intel'],\n-\t'nvidia' : {\n-\t\t'open-source' : ['xf86-video-nouveau'],\n-\t\t'proprietary' : ['nvidia']\n-\t},\n-\t'mesa' : ['mesa'],\n-\t'fbdev' : ['xf86-video-fbdev'],\n-\t'vesa' : ['xf86-video-vesa'],\n-\t'vmware / virtualbox' : ['xf86-video-vmware']\n+\t\"VMware / VirtualBox (open-source)\": [\"mesa\", \"xf86-video-vmware\"],\n }\n \n def hasWifi()->bool:\n", "issue": "AMD graphics driver selection unclear\nFor AMD, the options are by no means obvious in what they actually do.\r\n\r\nThe recommended choice should in my opinion be mesa (packages mesa and radeon-vulkan).\r\nIf you want to go by vendor in the first table, then mesa should be moved into the secondary table, i.e. when selecting AMD you get the choice between AMDVLK and mesa. The current situation is just confusing.\n", "before_files": [{"content": "import os, subprocess, json\nfrom .general import sys_command\nfrom .networking import list_interfaces, enrichIfaceTypes\nfrom typing import Optional\n\n__packages__ = ['xf86-video-amdgpu', 'xf86-video-ati', 'xf86-video-intel', 'xf86-video-nouveau', 'xf86-video-fbdev', 'xf86-video-vesa', 'xf86-video-vmware', 'nvidia', 'mesa']\n\nAVAILABLE_GFX_DRIVERS = {\n\t# Sub-dicts are layer-2 options to be selected\n\t# and lists are a list of packages to be installed\n\t'AMD / ATI' : {\n\t\t'amd' : ['xf86-video-amdgpu'],\n\t\t'ati' : ['xf86-video-ati']\n\t},\n\t'intel' : ['xf86-video-intel'],\n\t'nvidia' : {\n\t\t'open-source' : ['xf86-video-nouveau'],\n\t\t'proprietary' : ['nvidia']\n\t},\n\t'mesa' : ['mesa'],\n\t'fbdev' : ['xf86-video-fbdev'],\n\t'vesa' : ['xf86-video-vesa'],\n\t'vmware / virtualbox' : ['xf86-video-vmware']\n}\n\ndef hasWifi()->bool:\n\treturn 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()\n\ndef hasAMDCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep AMD\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\ndef hasIntelCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep Intel\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\n\ndef hasUEFI()->bool:\n\treturn os.path.isdir('/sys/firmware/efi')\n\ndef graphicsDevices()->dict:\n\tcards = {}\n\tfor line in sys_command(f\"lspci\"):\n\t\tif b' VGA ' in line:\n\t\t\t_, identifier = line.split(b': ',1)\n\t\t\tcards[identifier.strip().lower().decode('UTF-8')] = line\n\treturn cards\n\ndef hasNvidiaGraphics()->bool:\n\treturn any('nvidia' in x for x in graphicsDevices())\n\ndef hasAmdGraphics()->bool:\n\treturn any('amd' in x for x in graphicsDevices())\n\ndef hasIntelGraphics()->bool:\n\treturn any('intel' in x for x in graphicsDevices())\n\n\ndef cpuVendor()-> Optional[str]:\n\tcpu_info = json.loads(subprocess.check_output(\"lscpu -J\", shell=True).decode('utf-8'))['lscpu']\n\tfor info in cpu_info:\n\t\tif info.get('field',None):\n\t\t\tif info.get('field',None) == \"Vendor ID:\":\n\t\t\t\treturn info.get('data',None)\n\ndef isVM() -> bool:\n\ttry:\n\t\tsubprocess.check_call([\"systemd-detect-virt\"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine\n\t\treturn True\n\texcept:\n\t\treturn False\n\n# TODO: Add more identifiers\n", "path": "archinstall/lib/hardware.py"}], "after_files": [{"content": "import os, subprocess, json\nfrom .general import sys_command\nfrom .networking import list_interfaces, enrichIfaceTypes\nfrom typing import Optional\n\n__packages__ = [\n\t\t\"mesa\",\n\t\t\"xf86-video-amdgpu\",\n\t\t\"xf86-video-ati\",\n\t\t\"xf86-video-nouveau\",\n\t\t\"xf86-video-vmware\",\n\t\t\"libva-mesa-driver\",\n\t\t\"libva-intel-driver\",\n\t\t\"intel-media-driver\",\n\t\t\"vulkan-radeon\",\n\t\t\"vulkan-intel\",\n\t\t\"nvidia\",\n]\n\nAVAILABLE_GFX_DRIVERS = {\n\t# Sub-dicts are layer-2 options to be selected\n\t# and lists are a list of packages to be installed\n\t\"All open-source (default)\": [\n\t\t\"mesa\",\n\t\t\"xf86-video-amdgpu\",\n\t\t\"xf86-video-ati\",\n\t\t\"xf86-video-nouveau\",\n\t\t\"xf86-video-vmware\",\n\t\t\"libva-mesa-driver\",\n\t\t\"libva-intel-driver\",\n\t\t\"intel-media-driver\",\n\t\t\"vulkan-radeon\",\n\t\t\"vulkan-intel\",\n\t],\n\t\"AMD / ATI (open-source)\": [\n\t\t\"mesa\",\n\t\t\"xf86-video-amdgpu\",\n\t\t\"xf86-video-ati\",\n\t\t\"libva-mesa-driver\",\n\t\t\"vulkan-radeon\",\n\t],\n\t\"Intel (open-source)\": [\n\t\t\"mesa\",\n\t\t\"libva-intel-driver\",\n\t\t\"intel-media-driver\",\n\t\t\"vulkan-intel\",\n\t],\n\t\"Nvidia\": {\n\t\t\"open-source\": [\"mesa\", \"xf86-video-nouveau\", \"libva-mesa-driver\"],\n\t\t\"proprietary\": [\"nvidia\"],\n\t},\n\t\"VMware / VirtualBox (open-source)\": [\"mesa\", \"xf86-video-vmware\"],\n}\n\ndef hasWifi()->bool:\n\treturn 'WIRELESS' in enrichIfaceTypes(list_interfaces().values()).values()\n\ndef hasAMDCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep AMD\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\ndef hasIntelCPU()->bool:\n\tif subprocess.check_output(\"lscpu | grep Intel\", shell=True).strip().decode():\n\t\treturn True\n\treturn False\n\ndef hasUEFI()->bool:\n\treturn os.path.isdir('/sys/firmware/efi')\n\ndef graphicsDevices()->dict:\n\tcards = {}\n\tfor line in sys_command(f\"lspci\"):\n\t\tif b' VGA ' in line:\n\t\t\t_, identifier = line.split(b': ',1)\n\t\t\tcards[identifier.strip().lower().decode('UTF-8')] = line\n\treturn cards\n\ndef hasNvidiaGraphics()->bool:\n\treturn any('nvidia' in x for x in graphicsDevices())\n\ndef hasAmdGraphics()->bool:\n\treturn any('amd' in x for x in graphicsDevices())\n\ndef hasIntelGraphics()->bool:\n\treturn any('intel' in x for x in graphicsDevices())\n\n\ndef cpuVendor()-> Optional[str]:\n\tcpu_info = json.loads(subprocess.check_output(\"lscpu -J\", shell=True).decode('utf-8'))['lscpu']\n\tfor info in cpu_info:\n\t\tif info.get('field',None):\n\t\t\tif info.get('field',None) == \"Vendor ID:\":\n\t\t\t\treturn info.get('data',None)\n\ndef isVM() -> bool:\n\ttry:\n\t\tsubprocess.check_call([\"systemd-detect-virt\"]) # systemd-detect-virt issues a non-zero exit code if it is not on a virtual machine\n\t\treturn True\n\texcept:\n\t\treturn False\n\n# TODO: Add more identifiers\n", "path": "archinstall/lib/hardware.py"}]} | 1,178 | 744 |
gh_patches_debug_47929 | rasdani/github-patches | git_diff | liqd__a4-opin-1835 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sort by section changes automatically to "most recent" on productive
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/ideas/templatetags/idea_tags.py`
Content:
```
1 from django import template
2
3 from euth.ideas.models import Idea
4
5 register = template.Library()
6
7
8 @register.simple_tag
9 def get_range(number, listcount):
10 if number < 3:
11 return range(1, 6)
12 elif number > listcount - 2:
13 return range(listcount - 4, listcount + 1)
14 else:
15 return range(number - 2, number + 3)
16
17
18 @register.simple_tag
19 def is_idea_list(module):
20 return Idea.objects.filter(module=module).count() > 0
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/ideas/templatetags/idea_tags.py b/euth/ideas/templatetags/idea_tags.py
--- a/euth/ideas/templatetags/idea_tags.py
+++ b/euth/ideas/templatetags/idea_tags.py
@@ -18,3 +18,12 @@
@register.simple_tag
def is_idea_list(module):
return Idea.objects.filter(module=module).count() > 0
+
+
[email protected]_tag
+def combined_url_parameter(request_query_dict, **kwargs):
+ combined_query_dict = request_query_dict.copy()
+ for key in kwargs:
+ combined_query_dict.setlist(key, [kwargs[key]])
+ encoded_parameter = '?' + combined_query_dict.urlencode()
+ return encoded_parameter
| {"golden_diff": "diff --git a/euth/ideas/templatetags/idea_tags.py b/euth/ideas/templatetags/idea_tags.py\n--- a/euth/ideas/templatetags/idea_tags.py\n+++ b/euth/ideas/templatetags/idea_tags.py\n@@ -18,3 +18,12 @@\n @register.simple_tag\n def is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n+\n+\[email protected]_tag\n+def combined_url_parameter(request_query_dict, **kwargs):\n+ combined_query_dict = request_query_dict.copy()\n+ for key in kwargs:\n+ combined_query_dict.setlist(key, [kwargs[key]])\n+ encoded_parameter = '?' + combined_query_dict.urlencode()\n+ return encoded_parameter\n", "issue": "Sort by section changes automatically to \"most recent\" on productive\n\n", "before_files": [{"content": "from django import template\n\nfrom euth.ideas.models import Idea\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_range(number, listcount):\n if number < 3:\n return range(1, 6)\n elif number > listcount - 2:\n return range(listcount - 4, listcount + 1)\n else:\n return range(number - 2, number + 3)\n\n\[email protected]_tag\ndef is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n", "path": "euth/ideas/templatetags/idea_tags.py"}], "after_files": [{"content": "from django import template\n\nfrom euth.ideas.models import Idea\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_range(number, listcount):\n if number < 3:\n return range(1, 6)\n elif number > listcount - 2:\n return range(listcount - 4, listcount + 1)\n else:\n return range(number - 2, number + 3)\n\n\[email protected]_tag\ndef is_idea_list(module):\n return Idea.objects.filter(module=module).count() > 0\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n", "path": "euth/ideas/templatetags/idea_tags.py"}]} | 434 | 174 |
gh_patches_debug_2515 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2974 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test 2959: redesign mail of new Stellungnahme in b-plan module
**URL:** mail
**user:** sachbearbeiter
**expected behaviour:** logo is no longer in the email
**behaviour:** logo is on the bottom left corner of the mail, outside the mail layout box
**important screensize:**
**device & browser:** mail on mac
**Comment/Question:**
Screenshot?
<img width="776" alt="Bildschirmfoto 2020-05-25 um 15 44 09" src="https://user-images.githubusercontent.com/35491681/82819838-5e76f900-9ea1-11ea-99a9-9a531588387f.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/bplan/emails.py`
Content:
```
1 from django.conf import settings
2
3 from meinberlin.apps.contrib.emails import Email
4
5
6 class OfficeWorkerNotification(Email):
7 template_name = 'meinberlin_bplan/emails/office_worker_notification'
8
9 @property
10 def office_worker_email(self):
11 project = self.object.module.project
12 return project.externalproject.bplan.office_worker_email
13
14 @property
15 def bplan_identifier(self):
16 project = self.object.module.project
17 return project.externalproject.bplan.identifier
18
19 def get_receivers(self):
20 return [self.office_worker_email]
21
22 def get_context(self):
23 context = super().get_context()
24 context['module'] = self.object.module
25 context['project'] = self.object.module.project
26 context['contact_email'] = settings.CONTACT_EMAIL
27 context['identifier'] = self.bplan_identifier
28 return context
29
30
31 class SubmitterConfirmation(Email):
32 template_name = 'meinberlin_bplan/emails/submitter_confirmation'
33
34 def get_receivers(self):
35 return [self.object.email]
36
37 def get_context(self):
38 context = super().get_context()
39 context['module'] = self.object.module
40 context['project'] = self.object.module.project
41 context['contact_email'] = settings.CONTACT_EMAIL
42 return context
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/bplan/emails.py b/meinberlin/apps/bplan/emails.py
--- a/meinberlin/apps/bplan/emails.py
+++ b/meinberlin/apps/bplan/emails.py
@@ -27,6 +27,9 @@
context['identifier'] = self.bplan_identifier
return context
+ def get_attachments(self):
+ return []
+
class SubmitterConfirmation(Email):
template_name = 'meinberlin_bplan/emails/submitter_confirmation'
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/emails.py b/meinberlin/apps/bplan/emails.py\n--- a/meinberlin/apps/bplan/emails.py\n+++ b/meinberlin/apps/bplan/emails.py\n@@ -27,6 +27,9 @@\n context['identifier'] = self.bplan_identifier\n return context\n \n+ def get_attachments(self):\n+ return []\n+\n \n class SubmitterConfirmation(Email):\n template_name = 'meinberlin_bplan/emails/submitter_confirmation'\n", "issue": "test 2959: redesign mail of new Stellungnahme in b-plan module\n**URL:** mail\r\n**user:** sachbearbeiter\r\n**expected behaviour:** logo is no longer in the email\r\n**behaviour:** logo is on the bottom left corner of the mail, outside the mail layout box \r\n**important screensize:**\r\n**device & browser:** mail on mac\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n<img width=\"776\" alt=\"Bildschirmfoto 2020-05-25 um 15 44 09\" src=\"https://user-images.githubusercontent.com/35491681/82819838-5e76f900-9ea1-11ea-99a9-9a531588387f.png\">\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\n\nfrom meinberlin.apps.contrib.emails import Email\n\n\nclass OfficeWorkerNotification(Email):\n template_name = 'meinberlin_bplan/emails/office_worker_notification'\n\n @property\n def office_worker_email(self):\n project = self.object.module.project\n return project.externalproject.bplan.office_worker_email\n\n @property\n def bplan_identifier(self):\n project = self.object.module.project\n return project.externalproject.bplan.identifier\n\n def get_receivers(self):\n return [self.office_worker_email]\n\n def get_context(self):\n context = super().get_context()\n context['module'] = self.object.module\n context['project'] = self.object.module.project\n context['contact_email'] = settings.CONTACT_EMAIL\n context['identifier'] = self.bplan_identifier\n return context\n\n\nclass SubmitterConfirmation(Email):\n template_name = 'meinberlin_bplan/emails/submitter_confirmation'\n\n def get_receivers(self):\n return [self.object.email]\n\n def get_context(self):\n context = super().get_context()\n context['module'] = self.object.module\n context['project'] = self.object.module.project\n context['contact_email'] = settings.CONTACT_EMAIL\n return context\n", "path": "meinberlin/apps/bplan/emails.py"}], "after_files": [{"content": "from django.conf import settings\n\nfrom meinberlin.apps.contrib.emails import Email\n\n\nclass OfficeWorkerNotification(Email):\n template_name = 'meinberlin_bplan/emails/office_worker_notification'\n\n @property\n def office_worker_email(self):\n project = self.object.module.project\n return project.externalproject.bplan.office_worker_email\n\n @property\n def bplan_identifier(self):\n project = self.object.module.project\n return project.externalproject.bplan.identifier\n\n def get_receivers(self):\n return [self.office_worker_email]\n\n def get_context(self):\n context = super().get_context()\n context['module'] = self.object.module\n context['project'] = self.object.module.project\n context['contact_email'] = settings.CONTACT_EMAIL\n context['identifier'] = self.bplan_identifier\n return context\n\n def get_attachments(self):\n return []\n\n\nclass SubmitterConfirmation(Email):\n template_name = 'meinberlin_bplan/emails/submitter_confirmation'\n\n def get_receivers(self):\n return [self.object.email]\n\n def get_context(self):\n context = super().get_context()\n context['module'] = self.object.module\n context['project'] = self.object.module.project\n context['contact_email'] = settings.CONTACT_EMAIL\n return context\n", "path": "meinberlin/apps/bplan/emails.py"}]} | 809 | 117 |
gh_patches_debug_11460 | rasdani/github-patches | git_diff | modoboa__modoboa-2495 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Robots.txt is missing from urls.py
# Impacted versions
* Modoboa: 1.12.2 and older
* installer used: Yes, but some modifications made
* Webserver: Nginx
# Steps to reproduce
Install modoboa and enable webinterface.
# Current behavior
No robots.txt is defined. Search engines do not now how to index the website. When search engines try to find robots.txt an 404 is raised and the error is mailed to ADMINS (if configured)
# Expected behavior
Robots.txt in urls.py defined, to deny all traffic, as webmail should not be publicly indexed by search engines. Possible fix, add:
`path('robots.txt', lambda r: HttpResponse("User-agent: *\nDisAllow: /", content_type="text/plain"), name='robots')`
# Video/Screenshot link (optional)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modoboa/core/urls.py`
Content:
```
1 """Core urls."""
2
3 from django.urls import path
4
5 from . import views
6
7 app_name = "core"
8
9 urlpatterns = [
10 path('', views.RootDispatchView.as_view(), name="root"),
11 path('dashboard/', views.DashboardView.as_view(), name="dashboard"),
12
13 path('accounts/login/', views.dologin, name="login"),
14 path('accounts/logout/', views.dologout, name="logout"),
15 path('accounts/2fa_verify/',
16 views.TwoFactorCodeVerifyView.as_view(),
17 name='2fa_verify'),
18
19 path('core/', views.viewsettings, name="index"),
20 path('core/parameters/', views.parameters, name="parameters"),
21 path('core/info/', views.information, name="information"),
22 path('core/logs/', views.logs, name="log_list"),
23 path('core/logs/page/', views.logs_page, name="logs_page"),
24 path('core/top_notifications/check/',
25 views.check_top_notifications,
26 name="top_notifications_check"),
27
28 path('user/', views.index, name="user_index"),
29 path('user/preferences/', views.preferences,
30 name="user_preferences"),
31 path('user/profile/', views.profile, name="user_profile"),
32 path('user/api/', views.api_access, name="user_api_access"),
33 path('user/security/', views.security, name="user_security"),
34 ]
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modoboa/core/urls.py b/modoboa/core/urls.py
--- a/modoboa/core/urls.py
+++ b/modoboa/core/urls.py
@@ -1,6 +1,7 @@
"""Core urls."""
from django.urls import path
+from django.views.generic.base import TemplateView
from . import views
@@ -31,4 +32,5 @@
path('user/profile/', views.profile, name="user_profile"),
path('user/api/', views.api_access, name="user_api_access"),
path('user/security/', views.security, name="user_security"),
+ path('robots.txt', TemplateView.as_view(template_name="core/robots.txt", content_type="text/plain")),
]
| {"golden_diff": "diff --git a/modoboa/core/urls.py b/modoboa/core/urls.py\n--- a/modoboa/core/urls.py\n+++ b/modoboa/core/urls.py\n@@ -1,6 +1,7 @@\n \"\"\"Core urls.\"\"\"\n \n from django.urls import path\n+from django.views.generic.base import TemplateView\n \n from . import views\n \n@@ -31,4 +32,5 @@\n path('user/profile/', views.profile, name=\"user_profile\"),\n path('user/api/', views.api_access, name=\"user_api_access\"),\n path('user/security/', views.security, name=\"user_security\"),\n+ path('robots.txt', TemplateView.as_view(template_name=\"core/robots.txt\", content_type=\"text/plain\")),\n ]\n", "issue": "Robots.txt is missing from urls.py\n# Impacted versions\r\n\r\n* Modoboa: 1.12.2 and older\r\n* installer used: Yes, but some modifications made\r\n* Webserver: Nginx\r\n\r\n# Steps to reproduce\r\nInstall modoboa and enable webinterface.\r\n\r\n# Current behavior\r\nNo robots.txt is defined. Search engines do not now how to index the website. When search engines try to find robots.txt an 404 is raised and the error is mailed to ADMINS (if configured)\r\n\r\n# Expected behavior\r\nRobots.txt in urls.py defined, to deny all traffic, as webmail should not be publicly indexed by search engines. Possible fix, add:\r\n`path('robots.txt', lambda r: HttpResponse(\"User-agent: *\\nDisAllow: /\", content_type=\"text/plain\"), name='robots')`\r\n\r\n# Video/Screenshot link (optional)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Core urls.\"\"\"\n\nfrom django.urls import path\n\nfrom . import views\n\napp_name = \"core\"\n\nurlpatterns = [\n path('', views.RootDispatchView.as_view(), name=\"root\"),\n path('dashboard/', views.DashboardView.as_view(), name=\"dashboard\"),\n\n path('accounts/login/', views.dologin, name=\"login\"),\n path('accounts/logout/', views.dologout, name=\"logout\"),\n path('accounts/2fa_verify/',\n views.TwoFactorCodeVerifyView.as_view(),\n name='2fa_verify'),\n\n path('core/', views.viewsettings, name=\"index\"),\n path('core/parameters/', views.parameters, name=\"parameters\"),\n path('core/info/', views.information, name=\"information\"),\n path('core/logs/', views.logs, name=\"log_list\"),\n path('core/logs/page/', views.logs_page, name=\"logs_page\"),\n path('core/top_notifications/check/',\n views.check_top_notifications,\n name=\"top_notifications_check\"),\n\n path('user/', views.index, name=\"user_index\"),\n path('user/preferences/', views.preferences,\n name=\"user_preferences\"),\n path('user/profile/', views.profile, name=\"user_profile\"),\n path('user/api/', views.api_access, name=\"user_api_access\"),\n path('user/security/', views.security, name=\"user_security\"),\n]\n", "path": "modoboa/core/urls.py"}], "after_files": [{"content": "\"\"\"Core urls.\"\"\"\n\nfrom django.urls import path\nfrom django.views.generic.base import TemplateView\n\nfrom . import views\n\napp_name = \"core\"\n\nurlpatterns = [\n path('', views.RootDispatchView.as_view(), name=\"root\"),\n path('dashboard/', views.DashboardView.as_view(), name=\"dashboard\"),\n\n path('accounts/login/', views.dologin, name=\"login\"),\n path('accounts/logout/', views.dologout, name=\"logout\"),\n path('accounts/2fa_verify/',\n views.TwoFactorCodeVerifyView.as_view(),\n name='2fa_verify'),\n\n path('core/', views.viewsettings, name=\"index\"),\n path('core/parameters/', views.parameters, name=\"parameters\"),\n path('core/info/', views.information, name=\"information\"),\n path('core/logs/', views.logs, name=\"log_list\"),\n path('core/logs/page/', views.logs_page, name=\"logs_page\"),\n path('core/top_notifications/check/',\n views.check_top_notifications,\n name=\"top_notifications_check\"),\n\n path('user/', views.index, name=\"user_index\"),\n path('user/preferences/', views.preferences,\n name=\"user_preferences\"),\n path('user/profile/', views.profile, name=\"user_profile\"),\n path('user/api/', views.api_access, name=\"user_api_access\"),\n path('user/security/', views.security, name=\"user_security\"),\n path('robots.txt', TemplateView.as_view(template_name=\"core/robots.txt\", content_type=\"text/plain\")),\n]\n", "path": "modoboa/core/urls.py"}]} | 792 | 158 |
gh_patches_debug_3849 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2037 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Celery is only using low priority queue
I'm not sure if this is happening for everyone or just bookwyrm.social, but all my celery tasks are going to the `low_priority` queue and it's making everything run super slowly!
(@tofuwabohu are you noticing this in flower?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celerywyrm/settings.py`
Content:
```
1 """ bookwyrm settings and configuration """
2 # pylint: disable=wildcard-import
3 # pylint: disable=unused-wildcard-import
4 from bookwyrm.settings import *
5
6 # pylint: disable=line-too-long
7 REDIS_BROKER_PASSWORD = requests.utils.quote(env("REDIS_BROKER_PASSWORD", None))
8 REDIS_BROKER_HOST = env("REDIS_BROKER_HOST", "redis_broker")
9 REDIS_BROKER_PORT = env("REDIS_BROKER_PORT", 6379)
10 REDIS_BROKER_DB_INDEX = env("REDIS_BROKER_DB_INDEX", 0)
11
12 CELERY_BROKER_URL = f"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}"
13 CELERY_RESULT_BACKEND = f"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}"
14
15 CELERY_DEFAULT_QUEUE = "low_priority"
16
17 CELERY_ACCEPT_CONTENT = ["json"]
18 CELERY_TASK_SERIALIZER = "json"
19 CELERY_RESULT_SERIALIZER = "json"
20
21 CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
22 CELERY_TIMEZONE = env("TIME_ZONE", "UTC")
23
24 FLOWER_PORT = env("FLOWER_PORT")
25
26 INSTALLED_APPS = INSTALLED_APPS + [
27 "celerywyrm",
28 ]
29
30 ROOT_URLCONF = "celerywyrm.urls"
31
32 WSGI_APPLICATION = "celerywyrm.wsgi.application"
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celerywyrm/settings.py b/celerywyrm/settings.py
--- a/celerywyrm/settings.py
+++ b/celerywyrm/settings.py
@@ -13,6 +13,7 @@
CELERY_RESULT_BACKEND = f"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}"
CELERY_DEFAULT_QUEUE = "low_priority"
+CELERY_CREATE_MISSING_QUEUES = True
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
| {"golden_diff": "diff --git a/celerywyrm/settings.py b/celerywyrm/settings.py\n--- a/celerywyrm/settings.py\n+++ b/celerywyrm/settings.py\n@@ -13,6 +13,7 @@\n CELERY_RESULT_BACKEND = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\n \n CELERY_DEFAULT_QUEUE = \"low_priority\"\n+CELERY_CREATE_MISSING_QUEUES = True\n \n CELERY_ACCEPT_CONTENT = [\"json\"]\n CELERY_TASK_SERIALIZER = \"json\"\n", "issue": "Celery is only using low priority queue\nI'm not sure if this is happening for everyone or just bookwyrm.social, but all my celery tasks are going to the `low_priority` queue and it's making everything run super slowly!\r\n\r\n(@tofuwabohu are you noticing this in flower?)\n", "before_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\n# pylint: disable=wildcard-import\n# pylint: disable=unused-wildcard-import\nfrom bookwyrm.settings import *\n\n# pylint: disable=line-too-long\nREDIS_BROKER_PASSWORD = requests.utils.quote(env(\"REDIS_BROKER_PASSWORD\", None))\nREDIS_BROKER_HOST = env(\"REDIS_BROKER_HOST\", \"redis_broker\")\nREDIS_BROKER_PORT = env(\"REDIS_BROKER_PORT\", 6379)\nREDIS_BROKER_DB_INDEX = env(\"REDIS_BROKER_DB_INDEX\", 0)\n\nCELERY_BROKER_URL = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\nCELERY_RESULT_BACKEND = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\n\nCELERY_DEFAULT_QUEUE = \"low_priority\"\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_RESULT_SERIALIZER = \"json\"\n\nCELERY_BEAT_SCHEDULER = \"django_celery_beat.schedulers:DatabaseScheduler\"\nCELERY_TIMEZONE = env(\"TIME_ZONE\", \"UTC\")\n\nFLOWER_PORT = env(\"FLOWER_PORT\")\n\nINSTALLED_APPS = INSTALLED_APPS + [\n \"celerywyrm\",\n]\n\nROOT_URLCONF = \"celerywyrm.urls\"\n\nWSGI_APPLICATION = \"celerywyrm.wsgi.application\"\n", "path": "celerywyrm/settings.py"}], "after_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\n# pylint: disable=wildcard-import\n# pylint: disable=unused-wildcard-import\nfrom bookwyrm.settings import *\n\n# pylint: disable=line-too-long\nREDIS_BROKER_PASSWORD = requests.utils.quote(env(\"REDIS_BROKER_PASSWORD\", None))\nREDIS_BROKER_HOST = env(\"REDIS_BROKER_HOST\", \"redis_broker\")\nREDIS_BROKER_PORT = env(\"REDIS_BROKER_PORT\", 6379)\nREDIS_BROKER_DB_INDEX = env(\"REDIS_BROKER_DB_INDEX\", 0)\n\nCELERY_BROKER_URL = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\nCELERY_RESULT_BACKEND = f\"redis://:{REDIS_BROKER_PASSWORD}@{REDIS_BROKER_HOST}:{REDIS_BROKER_PORT}/{REDIS_BROKER_DB_INDEX}\"\n\nCELERY_DEFAULT_QUEUE = \"low_priority\"\nCELERY_CREATE_MISSING_QUEUES = True\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_RESULT_SERIALIZER = \"json\"\n\nCELERY_BEAT_SCHEDULER = \"django_celery_beat.schedulers:DatabaseScheduler\"\nCELERY_TIMEZONE = env(\"TIME_ZONE\", \"UTC\")\n\nFLOWER_PORT = env(\"FLOWER_PORT\")\n\nINSTALLED_APPS = INSTALLED_APPS + [\n \"celerywyrm\",\n]\n\nROOT_URLCONF = \"celerywyrm.urls\"\n\nWSGI_APPLICATION = \"celerywyrm.wsgi.application\"\n", "path": "celerywyrm/settings.py"}]} | 709 | 127 |
gh_patches_debug_48127 | rasdani/github-patches | git_diff | dynaconf__dynaconf-1010 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] TypeError for older versions of HVAC in read_secret_version method
**Describe the bug**
A combination of newer versions of Dynaconf with older versions of HVAC result in an incompatible mix of expected vs available arguments. Specifically you can get the following traceback.
```python
109 try:
110 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
--> 111 data = client.secrets.kv.v2.read_secret_version(
112 path,
113 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
114 raise_on_deleted_version=True, # keep default behavior
115 )
116 else:
117 data = client.secrets.kv.read_secret(
118 "data/" + path,
119 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
120 )
TypeError: KvV2.read_secret_version() got an unexpected keyword argument 'raise_on_deleted_version'
```
The PR introducing this feature was included in HVAC 1.1.0: https://github.com/hvac/hvac/pull/907
**To Reproduce**
Steps to reproduce the behavior:
1. Have a version of HVAC older than 1.1.0
2. Trigger a vault version read
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from __future__ import annotations
2
3 import os
4
5 from setuptools import find_packages
6 from setuptools import setup
7
8
9 def read(*names, **kwargs):
10 """Read a file."""
11 content = ""
12 with open(
13 os.path.join(os.path.dirname(__file__), *names),
14 encoding=kwargs.get("encoding", "utf8"),
15 ) as open_file:
16 content = open_file.read().strip()
17 return content
18
19
20 test_requirements = [
21 "pytest",
22 "pytest-cov",
23 "pytest-xdist",
24 "pytest-mock",
25 "flake8",
26 "pep8-naming",
27 "flake8-debugger",
28 "flake8-print",
29 "flake8-todo",
30 "radon",
31 "flask>=0.12",
32 "django",
33 "python-dotenv",
34 "toml",
35 "redis",
36 "hvac",
37 "configobj",
38 ]
39
40
41 setup(
42 name="dynaconf",
43 version=read("dynaconf", "VERSION"),
44 url="https://github.com/dynaconf/dynaconf",
45 license="MIT",
46 license_files=["LICENSE", "vendor_licenses/*"],
47 author="Bruno Rocha",
48 author_email="[email protected]",
49 description="The dynamic configurator for your Python Project",
50 long_description=read("README.md"),
51 long_description_content_type="text/markdown",
52 packages=find_packages(
53 exclude=[
54 "tests",
55 "tests.*",
56 "tests_functional",
57 "tests_functional.*",
58 "docs",
59 "legacy_docs",
60 "legacy_docs.*",
61 "docs.*",
62 "build",
63 "build.*",
64 "dynaconf.vendor_src",
65 "dynaconf/vendor_src",
66 "dynaconf.vendor_src.*",
67 "dynaconf/vendor_src/*",
68 ]
69 ),
70 include_package_data=True,
71 zip_safe=False,
72 platforms="any",
73 tests_require=test_requirements,
74 extras_require={
75 "redis": ["redis"],
76 "vault": ["hvac"],
77 "yaml": ["ruamel.yaml"],
78 "toml": ["toml"],
79 "ini": ["configobj"],
80 "configobj": ["configobj"],
81 "all": ["redis", "ruamel.yaml", "configobj", "hvac"],
82 "test": test_requirements,
83 },
84 python_requires=">=3.8",
85 entry_points={"console_scripts": ["dynaconf=dynaconf.cli:main"]},
86 setup_requires=["setuptools>=38.6.0"],
87 classifiers=[
88 "Development Status :: 5 - Production/Stable",
89 "Framework :: Django",
90 "Framework :: Flask",
91 "Intended Audience :: Developers",
92 "License :: OSI Approved :: MIT License",
93 "Natural Language :: English",
94 "Operating System :: OS Independent",
95 "Programming Language :: Python",
96 "Programming Language :: Python :: 3",
97 "Programming Language :: Python :: 3 :: Only",
98 "Programming Language :: Python :: 3.8",
99 "Programming Language :: Python :: 3.9",
100 "Programming Language :: Python :: 3.10",
101 "Programming Language :: Python :: 3.11",
102 "Topic :: Utilities",
103 "Topic :: Software Development :: Libraries",
104 "Topic :: Software Development :: Libraries :: Python Modules",
105 ],
106 )
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
"python-dotenv",
"toml",
"redis",
- "hvac",
+ "hvac>=1.1.0",
"configobj",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n \"python-dotenv\",\n \"toml\",\n \"redis\",\n- \"hvac\",\n+ \"hvac>=1.1.0\",\n \"configobj\",\n ]\n", "issue": "[bug] TypeError for older versions of HVAC in read_secret_version method\n**Describe the bug**\r\nA combination of newer versions of Dynaconf with older versions of HVAC result in an incompatible mix of expected vs available arguments. Specifically you can get the following traceback.\r\n\r\n```python\r\n 109 try:\r\n 110 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\r\n--> 111 data = client.secrets.kv.v2.read_secret_version(\r\n 112 path,\r\n 113 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\r\n 114 raise_on_deleted_version=True, # keep default behavior\r\n 115 )\r\n 116 else:\r\n 117 data = client.secrets.kv.read_secret(\r\n 118 \"data/\" + path,\r\n 119 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\r\n 120 )\r\n\r\nTypeError: KvV2.read_secret_version() got an unexpected keyword argument 'raise_on_deleted_version'\r\n```\r\n\r\nThe PR introducing this feature was included in HVAC 1.1.0: https://github.com/hvac/hvac/pull/907 \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Have a version of HVAC older than 1.1.0\r\n2. Trigger a vault version read\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef read(*names, **kwargs):\n \"\"\"Read a file.\"\"\"\n content = \"\"\n with open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as open_file:\n content = open_file.read().strip()\n return content\n\n\ntest_requirements = [\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-mock\",\n \"flake8\",\n \"pep8-naming\",\n \"flake8-debugger\",\n \"flake8-print\",\n \"flake8-todo\",\n \"radon\",\n \"flask>=0.12\",\n \"django\",\n \"python-dotenv\",\n \"toml\",\n \"redis\",\n \"hvac\",\n \"configobj\",\n]\n\n\nsetup(\n name=\"dynaconf\",\n version=read(\"dynaconf\", \"VERSION\"),\n url=\"https://github.com/dynaconf/dynaconf\",\n license=\"MIT\",\n license_files=[\"LICENSE\", \"vendor_licenses/*\"],\n author=\"Bruno Rocha\",\n author_email=\"[email protected]\",\n description=\"The dynamic configurator for your Python Project\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\n \"tests\",\n \"tests.*\",\n \"tests_functional\",\n \"tests_functional.*\",\n \"docs\",\n \"legacy_docs\",\n \"legacy_docs.*\",\n \"docs.*\",\n \"build\",\n \"build.*\",\n \"dynaconf.vendor_src\",\n \"dynaconf/vendor_src\",\n \"dynaconf.vendor_src.*\",\n \"dynaconf/vendor_src/*\",\n ]\n ),\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n tests_require=test_requirements,\n extras_require={\n \"redis\": [\"redis\"],\n \"vault\": [\"hvac\"],\n \"yaml\": [\"ruamel.yaml\"],\n \"toml\": [\"toml\"],\n \"ini\": [\"configobj\"],\n \"configobj\": [\"configobj\"],\n \"all\": [\"redis\", \"ruamel.yaml\", \"configobj\", \"hvac\"],\n \"test\": test_requirements,\n },\n python_requires=\">=3.8\",\n entry_points={\"console_scripts\": [\"dynaconf=dynaconf.cli:main\"]},\n setup_requires=[\"setuptools>=38.6.0\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Django\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Utilities\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef read(*names, **kwargs):\n \"\"\"Read a file.\"\"\"\n content = \"\"\n with open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as open_file:\n content = open_file.read().strip()\n return content\n\n\ntest_requirements = [\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-mock\",\n \"flake8\",\n \"pep8-naming\",\n \"flake8-debugger\",\n \"flake8-print\",\n \"flake8-todo\",\n \"radon\",\n \"flask>=0.12\",\n \"django\",\n \"python-dotenv\",\n \"toml\",\n \"redis\",\n \"hvac>=1.1.0\",\n \"configobj\",\n]\n\n\nsetup(\n name=\"dynaconf\",\n version=read(\"dynaconf\", \"VERSION\"),\n url=\"https://github.com/dynaconf/dynaconf\",\n license=\"MIT\",\n license_files=[\"LICENSE\", \"vendor_licenses/*\"],\n author=\"Bruno Rocha\",\n author_email=\"[email protected]\",\n description=\"The dynamic configurator for your Python Project\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\n \"tests\",\n \"tests.*\",\n \"tests_functional\",\n \"tests_functional.*\",\n \"docs\",\n \"legacy_docs\",\n \"legacy_docs.*\",\n \"docs.*\",\n \"build\",\n \"build.*\",\n \"dynaconf.vendor_src\",\n \"dynaconf/vendor_src\",\n \"dynaconf.vendor_src.*\",\n \"dynaconf/vendor_src/*\",\n ]\n ),\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n tests_require=test_requirements,\n extras_require={\n \"redis\": [\"redis\"],\n \"vault\": [\"hvac\"],\n \"yaml\": [\"ruamel.yaml\"],\n \"toml\": [\"toml\"],\n \"ini\": [\"configobj\"],\n \"configobj\": [\"configobj\"],\n \"all\": [\"redis\", \"ruamel.yaml\", \"configobj\", \"hvac\"],\n \"test\": test_requirements,\n },\n python_requires=\">=3.8\",\n entry_points={\"console_scripts\": [\"dynaconf=dynaconf.cli:main\"]},\n setup_requires=[\"setuptools>=38.6.0\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Django\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Utilities\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]} | 1,496 | 71 |
gh_patches_debug_23445 | rasdani/github-patches | git_diff | liqd__a4-opin-689 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invite: email address should be independent of letter case
After testing invites for private projects a lot with AEGEE, I finally found out what their problem was. When they invite users, the auto correct on their Android tablet lets email addresses start with an uppercase letter. The users they wanted to invite had their email address written in lowercase letters though. OPIN did not recognize them as the same users. We should change this behaviour ASAP. It should not matter anywhere whether a user inputs email addresses in lower or uppercase letters.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/memberships/views.py`
Content:
```
1 from django.http import Http404
2 from django.shortcuts import redirect
3 from django.views import generic
4 from rules.compat import access_mixins as mixin
5
6 from adhocracy4.projects import models as prj_models
7 from adhocracy4.projects import views as prj_views
8
9 from . import forms, models
10
11
12 class RequestsProjectDetailView(prj_views.ProjectDetailView):
13
14 def handle_no_permission(self):
15 """
16 Check if user clould join
17 """
18 user = self.request.user
19 is_member = user.is_authenticated() and self.project.has_member(user)
20
21 if is_member:
22 return super().handle_no_permission()
23 else:
24 return self.handle_no_membership()
25
26 def handle_no_membership(self):
27 membership_impossible = (
28 not self.request.user.is_authenticated()
29 or self.project.is_draft
30 or self.project.has_member(self.request.user)
31 )
32
33 if membership_impossible:
34 return super().handle_no_permission()
35 else:
36 return redirect('memberships-request',
37 project_slug=self.project.slug)
38
39
40 class InviteView(mixin.LoginRequiredMixin, generic.UpdateView):
41 model = models.Invite
42 form_class = forms.InviteForm
43 slug_field = 'token'
44 slug_url_kwarg = 'invite_token'
45
46 def get_form_kwargs(self):
47 kwargs = super().get_form_kwargs()
48 kwargs.update({'user': self.request.user})
49 return kwargs
50
51 def form_valid(self, form):
52 if form.is_accepted():
53 form.instance.accept(self.request.user)
54 return redirect(form.instance.project.get_absolute_url())
55 else:
56 form.instance.reject()
57 return redirect('/')
58
59
60 class RequestView(mixin.LoginRequiredMixin, generic.DetailView):
61 """
62 Displays membership request if it exists or allows to create one.
63 """
64 model = models.Request
65 slug_field = 'project__slug'
66 slug_url_kwarg = 'project_slug'
67 context_object_name = 'join_request'
68
69 def get_queryset(self):
70 return self.model.objects.filter(creator=self.request.user)
71
72 def get(self, request, *args, **kwargs):
73 if self.project.has_member(request.user):
74 return redirect(self.project.get_absolute_url())
75 else:
76 return super().get(request, *args, **kwargs)
77
78 def post(self, request, *args, **kwargs):
79 user = request.user
80 project = self.project
81 models.Request.objects.request_membership(project, user)
82 return redirect(self.request.path)
83
84 def get_object(self, queryset=None):
85 try:
86 return super().get_object(queryset)
87 except Http404:
88 return None
89
90 @property
91 def project(self):
92 project_slug = self.kwargs[self.slug_url_kwarg]
93 return prj_models.Project.objects.get(slug=project_slug)
94
```
Path: `euth/memberships/forms.py`
Content:
```
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from . import models
5
6
7 class InviteForm(forms.ModelForm):
8 accept = forms.CharField(required=False)
9 reject = forms.CharField(required=False)
10
11 class Meta:
12 model = models.Invite
13 fields = ['accept', 'reject']
14
15 def __init__(self, user=None, **kwargs):
16 super().__init__(**kwargs)
17 self.user = user
18
19 def clean(self):
20 data = self.data
21 if 'accept' not in data and 'reject' not in data:
22 raise ValidationError('Reject or accept')
23 if 'accept' in data and not self.user.email == self.instance.email:
24 raise ValidationError('This user has another email address than '
25 'the one that received the invitation.')
26 return data
27
28 def is_accepted(self):
29 data = self.data
30 return 'accept' in data and 'reject' not in data
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py
--- a/euth/memberships/forms.py
+++ b/euth/memberships/forms.py
@@ -12,17 +12,10 @@
model = models.Invite
fields = ['accept', 'reject']
- def __init__(self, user=None, **kwargs):
- super().__init__(**kwargs)
- self.user = user
-
def clean(self):
data = self.data
if 'accept' not in data and 'reject' not in data:
raise ValidationError('Reject or accept')
- if 'accept' in data and not self.user.email == self.instance.email:
- raise ValidationError('This user has another email address than '
- 'the one that received the invitation.')
return data
def is_accepted(self):
diff --git a/euth/memberships/views.py b/euth/memberships/views.py
--- a/euth/memberships/views.py
+++ b/euth/memberships/views.py
@@ -43,11 +43,6 @@
slug_field = 'token'
slug_url_kwarg = 'invite_token'
- def get_form_kwargs(self):
- kwargs = super().get_form_kwargs()
- kwargs.update({'user': self.request.user})
- return kwargs
-
def form_valid(self, form):
if form.is_accepted():
form.instance.accept(self.request.user)
| {"golden_diff": "diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py\n--- a/euth/memberships/forms.py\n+++ b/euth/memberships/forms.py\n@@ -12,17 +12,10 @@\n model = models.Invite\n fields = ['accept', 'reject']\n \n- def __init__(self, user=None, **kwargs):\n- super().__init__(**kwargs)\n- self.user = user\n-\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n- if 'accept' in data and not self.user.email == self.instance.email:\n- raise ValidationError('This user has another email address than '\n- 'the one that received the invitation.')\n return data\n \n def is_accepted(self):\ndiff --git a/euth/memberships/views.py b/euth/memberships/views.py\n--- a/euth/memberships/views.py\n+++ b/euth/memberships/views.py\n@@ -43,11 +43,6 @@\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n \n- def get_form_kwargs(self):\n- kwargs = super().get_form_kwargs()\n- kwargs.update({'user': self.request.user})\n- return kwargs\n-\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n", "issue": "Invite: email address should be independent of letter case \nAfter testing invites for private projects a lot with AEGEE, I finally found out what their problem was. When they invite users, the auto correct on their Android tablet lets email addresses start with an uppercase letter. The users they wanted to invite had their email address written in lowercase letters though. OPIN did not recognize them as the same users. We should change this behaviour ASAP. It should not matter anywhere whether a user inputs email addresses in lower or uppercase letters.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def __init__(self, user=None, **kwargs):\n super().__init__(**kwargs)\n self.user = user\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n if 'accept' in data and not self.user.email == self.instance.email:\n raise ValidationError('This user has another email address than '\n 'the one that received the invitation.')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}]} | 1,397 | 316 |
gh_patches_debug_13444 | rasdani/github-patches | git_diff | iterative__dvc-5425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tests: exp executor teardown is flaky on windows
Looks like there is some race condition on windows that sometimes happens between cleaning up the test `tmp_dir` and cleaning up the experiments executor temp directory (which is placed in `tmp_dir/.dvc/tmp/...`). May be better to go back to running experiments in system `$TEMP` instead of `.dvc/tmp` (for win tests only)?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/experiments/executor/local.py`
Content:
```
1 import logging
2 import os
3 import sys
4 from tempfile import TemporaryDirectory
5 from typing import Optional
6
7 from dvc.utils.fs import remove
8
9 from .base import BaseExecutor
10
11 logger = logging.getLogger(__name__)
12
13
14 class BaseLocalExecutor(BaseExecutor):
15 """Base local machine executor."""
16
17 @property
18 def git_url(self) -> str:
19 root_dir = os.path.abspath(self.root_dir)
20 if os.name == "nt":
21 root_dir = root_dir.replace(os.sep, "/")
22 return f"file://{root_dir}"
23
24
25 class TempDirExecutor(BaseLocalExecutor):
26 """Temp directory experiment executor."""
27
28 # Temp dir executors should warn if untracked files exist (to help with
29 # debugging user code), and suppress other DVC hints (like `git add`
30 # suggestions) that are not applicable outside of workspace runs
31 WARN_UNTRACKED = True
32 QUIET = True
33
34 def __init__(
35 self,
36 *args,
37 tmp_dir: Optional[str] = None,
38 cache_dir: Optional[str] = None,
39 **kwargs,
40 ):
41 self._tmp_dir = TemporaryDirectory(dir=tmp_dir)
42 kwargs["root_dir"] = self._tmp_dir.name
43 super().__init__(*args, **kwargs)
44 if cache_dir:
45 self._config(cache_dir)
46 logger.debug(
47 "Init temp dir executor in dir '%s'", self._tmp_dir,
48 )
49
50 def _config(self, cache_dir):
51 local_config = os.path.join(self.dvc_dir, "config.local")
52 logger.debug("Writing experiments local config '%s'", local_config)
53 with open(local_config, "w") as fobj:
54 fobj.write(f"[cache]\n dir = {cache_dir}")
55
56 def cleanup(self):
57 super().cleanup()
58 logger.debug("Removing tmpdir '%s'", self._tmp_dir)
59 try:
60 self._tmp_dir.cleanup()
61 except PermissionError:
62 if os.name == "nt" and sys.version_info < (3, 8):
63 # see https://bugs.python.org/issue26660
64 remove(self._tmp_dir.name)
65 return
66 raise
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/repo/experiments/executor/local.py b/dvc/repo/experiments/executor/local.py
--- a/dvc/repo/experiments/executor/local.py
+++ b/dvc/repo/experiments/executor/local.py
@@ -1,6 +1,5 @@
import logging
import os
-import sys
from tempfile import TemporaryDirectory
from typing import Optional
@@ -56,11 +55,4 @@
def cleanup(self):
super().cleanup()
logger.debug("Removing tmpdir '%s'", self._tmp_dir)
- try:
- self._tmp_dir.cleanup()
- except PermissionError:
- if os.name == "nt" and sys.version_info < (3, 8):
- # see https://bugs.python.org/issue26660
- remove(self._tmp_dir.name)
- return
- raise
+ remove(self._tmp_dir.name)
| {"golden_diff": "diff --git a/dvc/repo/experiments/executor/local.py b/dvc/repo/experiments/executor/local.py\n--- a/dvc/repo/experiments/executor/local.py\n+++ b/dvc/repo/experiments/executor/local.py\n@@ -1,6 +1,5 @@\n import logging\n import os\n-import sys\n from tempfile import TemporaryDirectory\n from typing import Optional\n \n@@ -56,11 +55,4 @@\n def cleanup(self):\n super().cleanup()\n logger.debug(\"Removing tmpdir '%s'\", self._tmp_dir)\n- try:\n- self._tmp_dir.cleanup()\n- except PermissionError:\n- if os.name == \"nt\" and sys.version_info < (3, 8):\n- # see https://bugs.python.org/issue26660\n- remove(self._tmp_dir.name)\n- return\n- raise\n+ remove(self._tmp_dir.name)\n", "issue": "tests: exp executor teardown is flaky on windows\nLooks like there is some race condition on windows that sometimes happens between cleaning up the test `tmp_dir` and cleaning up the experiments executor temp directory (which is placed in `tmp_dir/.dvc/tmp/...`). May be better to go back to running experiments in system `$TEMP` instead of `.dvc/tmp` (for win tests only)?\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom tempfile import TemporaryDirectory\nfrom typing import Optional\n\nfrom dvc.utils.fs import remove\n\nfrom .base import BaseExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseLocalExecutor(BaseExecutor):\n \"\"\"Base local machine executor.\"\"\"\n\n @property\n def git_url(self) -> str:\n root_dir = os.path.abspath(self.root_dir)\n if os.name == \"nt\":\n root_dir = root_dir.replace(os.sep, \"/\")\n return f\"file://{root_dir}\"\n\n\nclass TempDirExecutor(BaseLocalExecutor):\n \"\"\"Temp directory experiment executor.\"\"\"\n\n # Temp dir executors should warn if untracked files exist (to help with\n # debugging user code), and suppress other DVC hints (like `git add`\n # suggestions) that are not applicable outside of workspace runs\n WARN_UNTRACKED = True\n QUIET = True\n\n def __init__(\n self,\n *args,\n tmp_dir: Optional[str] = None,\n cache_dir: Optional[str] = None,\n **kwargs,\n ):\n self._tmp_dir = TemporaryDirectory(dir=tmp_dir)\n kwargs[\"root_dir\"] = self._tmp_dir.name\n super().__init__(*args, **kwargs)\n if cache_dir:\n self._config(cache_dir)\n logger.debug(\n \"Init temp dir executor in dir '%s'\", self._tmp_dir,\n )\n\n def _config(self, cache_dir):\n local_config = os.path.join(self.dvc_dir, \"config.local\")\n logger.debug(\"Writing experiments local config '%s'\", local_config)\n with open(local_config, \"w\") as fobj:\n fobj.write(f\"[cache]\\n dir = {cache_dir}\")\n\n def cleanup(self):\n super().cleanup()\n logger.debug(\"Removing tmpdir '%s'\", self._tmp_dir)\n try:\n self._tmp_dir.cleanup()\n except PermissionError:\n if os.name == \"nt\" and sys.version_info < (3, 8):\n # see https://bugs.python.org/issue26660\n remove(self._tmp_dir.name)\n return\n raise\n", "path": "dvc/repo/experiments/executor/local.py"}], "after_files": [{"content": "import logging\nimport os\nfrom tempfile import TemporaryDirectory\nfrom typing import Optional\n\nfrom dvc.utils.fs import remove\n\nfrom .base import BaseExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseLocalExecutor(BaseExecutor):\n \"\"\"Base local machine executor.\"\"\"\n\n @property\n def git_url(self) -> str:\n root_dir = os.path.abspath(self.root_dir)\n if os.name == \"nt\":\n root_dir = root_dir.replace(os.sep, \"/\")\n return f\"file://{root_dir}\"\n\n\nclass TempDirExecutor(BaseLocalExecutor):\n \"\"\"Temp directory experiment executor.\"\"\"\n\n # Temp dir executors should warn if untracked files exist (to help with\n # debugging user code), and suppress other DVC hints (like `git add`\n # suggestions) that are not applicable outside of workspace runs\n WARN_UNTRACKED = True\n QUIET = True\n\n def __init__(\n self,\n *args,\n tmp_dir: Optional[str] = None,\n cache_dir: Optional[str] = None,\n **kwargs,\n ):\n self._tmp_dir = TemporaryDirectory(dir=tmp_dir)\n kwargs[\"root_dir\"] = self._tmp_dir.name\n super().__init__(*args, **kwargs)\n if cache_dir:\n self._config(cache_dir)\n logger.debug(\n \"Init temp dir executor in dir '%s'\", self._tmp_dir,\n )\n\n def _config(self, cache_dir):\n local_config = os.path.join(self.dvc_dir, \"config.local\")\n logger.debug(\"Writing experiments local config '%s'\", local_config)\n with open(local_config, \"w\") as fobj:\n fobj.write(f\"[cache]\\n dir = {cache_dir}\")\n\n def cleanup(self):\n super().cleanup()\n logger.debug(\"Removing tmpdir '%s'\", self._tmp_dir)\n remove(self._tmp_dir.name)\n", "path": "dvc/repo/experiments/executor/local.py"}]} | 934 | 201 |
gh_patches_debug_48687 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in "onmt-ark-to-records" code
I have found a small bug in the code line referenced below. It causes the script to terminate with a `TypeError: data type not understood`. Just for the sake of completeness, this is caused by the fact that numpy doesn't understand the object `tf.float32`. I changed that to `float` and it worked as it was supposed to. I can create a PR for this, but I suppose it is too trivial to do so and claim a contribution, unless you want me to.
https://github.com/OpenNMT/OpenNMT-tf/blob/5809c293d7bc65d923274cfd56b3339fc4107af6/opennmt/bin/ark_to_records.py#L46
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/bin/ark_to_records.py`
Content:
```
1 """ARK data file to TFRecords converter.
2
3 The scripts takes the ARK data file and optionally the indexed target text
4 to write aligned source and target data.
5 """
6
7 import argparse
8 import numpy as np
9 import tensorflow as tf
10
11 from opennmt.inputters.record_inputter import write_sequence_record
12
13
14 def consume_next_vector(ark_file):
15 """Consumes the next vector.
16
17 Args:
18 ark_file: The ARK data file.
19
20 Returns:
21 The next vector as a 2D Numpy array.
22 """
23 idx = None
24 vector = []
25
26 for line in ark_file:
27 line = line.strip()
28 fields = line.split()
29
30 if not idx:
31 idx = fields[0]
32 fields.pop(0)
33 fields.pop(0)
34
35 end = fields and fields[-1] == "]"
36
37 if end:
38 fields.pop()
39
40 if fields:
41 vector.append(fields)
42
43 if end:
44 break
45
46 return idx, np.asarray(vector, dtype=tf.float32)
47
48 def consume_next_text(text_file):
49 """Consumes the next text line from `text_file`."""
50 idx = None
51 text = text_file.readline()
52
53 if text:
54 tokens = text.strip().split()
55 idx = tokens[0]
56 tokens.pop(0)
57 text = " ".join(tokens)
58
59 return idx, text
60
61 def write_text(text, writer):
62 """Serializes a line of text."""
63 writer.write(text)
64 writer.write("\n")
65
66 def ark_to_records_aligned(ark_filename, text_filename, out_prefix, compression_type=None):
67 """Converts ARK and text datasets to aligned TFRecords and text datasets."""
68 record_filename = "%s.records" % out_prefix
69 if compression_type == "GZIP":
70 record_filename = "%s.gz" % record_filename
71 record_writer = tf.io.TFRecordWriter(record_filename, options=compression_type)
72 text_writer = open(out_prefix + ".txt", encoding="utf-8", mode="w")
73
74 ark_buffer = {}
75 text_buffer = {}
76 count = 0
77
78 def _write_example(vector, text):
79 write_sequence_record(vector, record_writer)
80 write_text(text, text_writer)
81
82 def _search_aligned():
83 for idx in ark_buffer:
84 if idx in text_buffer:
85 vector = ark_buffer[idx]
86 text = text_buffer[idx]
87
88 del ark_buffer[idx]
89 del text_buffer[idx]
90
91 return vector, text
92
93 return None, None
94
95 with open(ark_filename, encoding="utf-8") as ark_file, open(text_filename, encoding="utf-8") as text_file: #pylint: disable=line-too-long
96 while True:
97 ark_idx, vector = consume_next_vector(ark_file)
98 text_idx, text = consume_next_text(text_file)
99
100 if not ark_idx and not text_idx:
101 # Both files are empty.
102 break
103
104 if ark_idx == text_idx:
105 # If the indices match, write the example.
106 _write_example(vector, text)
107 count += 1
108 else:
109 # Otherwise store the entries.
110 if ark_idx:
111 ark_buffer[ark_idx] = vector
112 if text_idx:
113 text_buffer[text_idx] = text
114
115 # Look if we can now find aligned entries.
116 vector, text = _search_aligned()
117
118 if vector is not None:
119 _write_example(vector, text)
120 count += 1
121
122 # Search alignments in stored entries.
123 while True:
124 vector, text = _search_aligned()
125 if vector is None:
126 break
127 _write_example(vector, text)
128 count += 1
129
130 record_writer.close()
131 text_writer.close()
132
133 print("Saved {} aligned records.".format(count))
134
135 def ark_to_records(ark_filename, out_prefix, compression_type=None):
136 """Converts ARK dataset to TFRecords."""
137 record_writer = tf.io.TFRecordWriter(out_prefix + ".records", options=compression_type)
138 count = 0
139
140 with open(ark_filename, encoding="utf-8") as ark_file:
141 while True:
142 ark_idx, vector = consume_next_vector(ark_file)
143 if not ark_idx:
144 break
145 write_sequence_record(vector, record_writer)
146 count += 1
147
148 record_writer.close()
149 print("Saved {} records.".format(count))
150
151
152 def main():
153 parser = argparse.ArgumentParser()
154 parser.add_argument("--ark", required=True,
155 help="Indexed ARK data file.")
156 parser.add_argument("--txt",
157 help=("Indexed target text data file "
158 "(must set it to align source and target files)."))
159 parser.add_argument("--out", required=True,
160 help="Output files prefix (will be suffixed by .records and .txt).")
161 parser.add_argument("--compression_type", default=None, choices=["GZIP"],
162 help="Optional compression type.")
163 args = parser.parse_args()
164
165 if args.txt:
166 ark_to_records_aligned(args.ark, args.txt, args.out, compression_type=args.compression_type)
167 else:
168 ark_to_records(args.ark, args.out, compression_type=args.compression_type)
169
170 if __name__ == "__main__":
171 main()
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opennmt/bin/ark_to_records.py b/opennmt/bin/ark_to_records.py
--- a/opennmt/bin/ark_to_records.py
+++ b/opennmt/bin/ark_to_records.py
@@ -43,7 +43,7 @@
if end:
break
- return idx, np.asarray(vector, dtype=tf.float32)
+ return idx, np.asarray(vector, dtype=np.float32)
def consume_next_text(text_file):
"""Consumes the next text line from `text_file`."""
| {"golden_diff": "diff --git a/opennmt/bin/ark_to_records.py b/opennmt/bin/ark_to_records.py\n--- a/opennmt/bin/ark_to_records.py\n+++ b/opennmt/bin/ark_to_records.py\n@@ -43,7 +43,7 @@\n if end:\n break\n \n- return idx, np.asarray(vector, dtype=tf.float32)\n+ return idx, np.asarray(vector, dtype=np.float32)\n \n def consume_next_text(text_file):\n \"\"\"Consumes the next text line from `text_file`.\"\"\"\n", "issue": "Bug in \"onmt-ark-to-records\" code\nI have found a small bug in the code line referenced below. It causes the script to terminate with a `TypeError: data type not understood`. Just for the sake of completeness, this is caused by the fact that numpy doesn't understand the object `tf.float32`. I changed that to `float` and it worked as it was supposed to. I can create a PR for this, but I suppose it is too trivial to do so and claim a contribution, unless you want me to.\r\n\r\nhttps://github.com/OpenNMT/OpenNMT-tf/blob/5809c293d7bc65d923274cfd56b3339fc4107af6/opennmt/bin/ark_to_records.py#L46\n", "before_files": [{"content": "\"\"\"ARK data file to TFRecords converter.\n\nThe scripts takes the ARK data file and optionally the indexed target text\nto write aligned source and target data.\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nfrom opennmt.inputters.record_inputter import write_sequence_record\n\n\ndef consume_next_vector(ark_file):\n \"\"\"Consumes the next vector.\n\n Args:\n ark_file: The ARK data file.\n\n Returns:\n The next vector as a 2D Numpy array.\n \"\"\"\n idx = None\n vector = []\n\n for line in ark_file:\n line = line.strip()\n fields = line.split()\n\n if not idx:\n idx = fields[0]\n fields.pop(0)\n fields.pop(0)\n\n end = fields and fields[-1] == \"]\"\n\n if end:\n fields.pop()\n\n if fields:\n vector.append(fields)\n\n if end:\n break\n\n return idx, np.asarray(vector, dtype=tf.float32)\n\ndef consume_next_text(text_file):\n \"\"\"Consumes the next text line from `text_file`.\"\"\"\n idx = None\n text = text_file.readline()\n\n if text:\n tokens = text.strip().split()\n idx = tokens[0]\n tokens.pop(0)\n text = \" \".join(tokens)\n\n return idx, text\n\ndef write_text(text, writer):\n \"\"\"Serializes a line of text.\"\"\"\n writer.write(text)\n writer.write(\"\\n\")\n\ndef ark_to_records_aligned(ark_filename, text_filename, out_prefix, compression_type=None):\n \"\"\"Converts ARK and text datasets to aligned TFRecords and text datasets.\"\"\"\n record_filename = \"%s.records\" % out_prefix\n if compression_type == \"GZIP\":\n record_filename = \"%s.gz\" % record_filename\n record_writer = tf.io.TFRecordWriter(record_filename, options=compression_type)\n text_writer = open(out_prefix + \".txt\", encoding=\"utf-8\", mode=\"w\")\n\n ark_buffer = {}\n text_buffer = {}\n count = 0\n\n def _write_example(vector, text):\n write_sequence_record(vector, record_writer)\n write_text(text, text_writer)\n\n def _search_aligned():\n for idx in ark_buffer:\n if idx in text_buffer:\n vector = ark_buffer[idx]\n text = text_buffer[idx]\n\n del ark_buffer[idx]\n del text_buffer[idx]\n\n return vector, text\n\n return None, None\n\n with open(ark_filename, encoding=\"utf-8\") as ark_file, open(text_filename, encoding=\"utf-8\") as text_file: #pylint: disable=line-too-long\n while True:\n ark_idx, vector = consume_next_vector(ark_file)\n text_idx, text = consume_next_text(text_file)\n\n if not ark_idx and not text_idx:\n # Both files are empty.\n break\n\n if ark_idx == text_idx:\n # If the indices match, write the example.\n _write_example(vector, text)\n count += 1\n else:\n # Otherwise store the entries.\n if ark_idx:\n ark_buffer[ark_idx] = vector\n if text_idx:\n text_buffer[text_idx] = text\n\n # Look if we can now find aligned entries.\n vector, text = _search_aligned()\n\n if vector is not None:\n _write_example(vector, text)\n count += 1\n\n # Search alignments in stored entries.\n while True:\n vector, text = _search_aligned()\n if vector is None:\n break\n _write_example(vector, text)\n count += 1\n\n record_writer.close()\n text_writer.close()\n\n print(\"Saved {} aligned records.\".format(count))\n\ndef ark_to_records(ark_filename, out_prefix, compression_type=None):\n \"\"\"Converts ARK dataset to TFRecords.\"\"\"\n record_writer = tf.io.TFRecordWriter(out_prefix + \".records\", options=compression_type)\n count = 0\n\n with open(ark_filename, encoding=\"utf-8\") as ark_file:\n while True:\n ark_idx, vector = consume_next_vector(ark_file)\n if not ark_idx:\n break\n write_sequence_record(vector, record_writer)\n count += 1\n\n record_writer.close()\n print(\"Saved {} records.\".format(count))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ark\", required=True,\n help=\"Indexed ARK data file.\")\n parser.add_argument(\"--txt\",\n help=(\"Indexed target text data file \"\n \"(must set it to align source and target files).\"))\n parser.add_argument(\"--out\", required=True,\n help=\"Output files prefix (will be suffixed by .records and .txt).\")\n parser.add_argument(\"--compression_type\", default=None, choices=[\"GZIP\"],\n help=\"Optional compression type.\")\n args = parser.parse_args()\n\n if args.txt:\n ark_to_records_aligned(args.ark, args.txt, args.out, compression_type=args.compression_type)\n else:\n ark_to_records(args.ark, args.out, compression_type=args.compression_type)\n\nif __name__ == \"__main__\":\n main()\n", "path": "opennmt/bin/ark_to_records.py"}], "after_files": [{"content": "\"\"\"ARK data file to TFRecords converter.\n\nThe scripts takes the ARK data file and optionally the indexed target text\nto write aligned source and target data.\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nfrom opennmt.inputters.record_inputter import write_sequence_record\n\n\ndef consume_next_vector(ark_file):\n \"\"\"Consumes the next vector.\n\n Args:\n ark_file: The ARK data file.\n\n Returns:\n The next vector as a 2D Numpy array.\n \"\"\"\n idx = None\n vector = []\n\n for line in ark_file:\n line = line.strip()\n fields = line.split()\n\n if not idx:\n idx = fields[0]\n fields.pop(0)\n fields.pop(0)\n\n end = fields and fields[-1] == \"]\"\n\n if end:\n fields.pop()\n\n if fields:\n vector.append(fields)\n\n if end:\n break\n\n return idx, np.asarray(vector, dtype=np.float32)\n\ndef consume_next_text(text_file):\n \"\"\"Consumes the next text line from `text_file`.\"\"\"\n idx = None\n text = text_file.readline()\n\n if text:\n tokens = text.strip().split()\n idx = tokens[0]\n tokens.pop(0)\n text = \" \".join(tokens)\n\n return idx, text\n\ndef write_text(text, writer):\n \"\"\"Serializes a line of text.\"\"\"\n writer.write(text)\n writer.write(\"\\n\")\n\ndef ark_to_records_aligned(ark_filename, text_filename, out_prefix, compression_type=None):\n \"\"\"Converts ARK and text datasets to aligned TFRecords and text datasets.\"\"\"\n record_filename = \"%s.records\" % out_prefix\n if compression_type == \"GZIP\":\n record_filename = \"%s.gz\" % record_filename\n record_writer = tf.io.TFRecordWriter(record_filename, options=compression_type)\n text_writer = open(out_prefix + \".txt\", encoding=\"utf-8\", mode=\"w\")\n\n ark_buffer = {}\n text_buffer = {}\n count = 0\n\n def _write_example(vector, text):\n write_sequence_record(vector, record_writer)\n write_text(text, text_writer)\n\n def _search_aligned():\n for idx in ark_buffer:\n if idx in text_buffer:\n vector = ark_buffer[idx]\n text = text_buffer[idx]\n\n del ark_buffer[idx]\n del text_buffer[idx]\n\n return vector, text\n\n return None, None\n\n with open(ark_filename, encoding=\"utf-8\") as ark_file, open(text_filename, encoding=\"utf-8\") as text_file: #pylint: disable=line-too-long\n while True:\n ark_idx, vector = consume_next_vector(ark_file)\n text_idx, text = consume_next_text(text_file)\n\n if not ark_idx and not text_idx:\n # Both files are empty.\n break\n\n if ark_idx == text_idx:\n # If the indices match, write the example.\n _write_example(vector, text)\n count += 1\n else:\n # Otherwise store the entries.\n if ark_idx:\n ark_buffer[ark_idx] = vector\n if text_idx:\n text_buffer[text_idx] = text\n\n # Look if we can now find aligned entries.\n vector, text = _search_aligned()\n\n if vector is not None:\n _write_example(vector, text)\n count += 1\n\n # Search alignments in stored entries.\n while True:\n vector, text = _search_aligned()\n if vector is None:\n break\n _write_example(vector, text)\n count += 1\n\n record_writer.close()\n text_writer.close()\n\n print(\"Saved {} aligned records.\".format(count))\n\ndef ark_to_records(ark_filename, out_prefix, compression_type=None):\n \"\"\"Converts ARK dataset to TFRecords.\"\"\"\n record_writer = tf.io.TFRecordWriter(out_prefix + \".records\", options=compression_type)\n count = 0\n\n with open(ark_filename, encoding=\"utf-8\") as ark_file:\n while True:\n ark_idx, vector = consume_next_vector(ark_file)\n if not ark_idx:\n break\n write_sequence_record(vector, record_writer)\n count += 1\n\n record_writer.close()\n print(\"Saved {} records.\".format(count))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ark\", required=True,\n help=\"Indexed ARK data file.\")\n parser.add_argument(\"--txt\",\n help=(\"Indexed target text data file \"\n \"(must set it to align source and target files).\"))\n parser.add_argument(\"--out\", required=True,\n help=\"Output files prefix (will be suffixed by .records and .txt).\")\n parser.add_argument(\"--compression_type\", default=None, choices=[\"GZIP\"],\n help=\"Optional compression type.\")\n args = parser.parse_args()\n\n if args.txt:\n ark_to_records_aligned(args.ark, args.txt, args.out, compression_type=args.compression_type)\n else:\n ark_to_records(args.ark, args.out, compression_type=args.compression_type)\n\nif __name__ == \"__main__\":\n main()\n", "path": "opennmt/bin/ark_to_records.py"}]} | 1,972 | 119 |
gh_patches_debug_2966 | rasdani/github-patches | git_diff | ivy-llc__ivy-16518 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
uniform
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/random.py`
Content:
```
1 # global
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/random.py b/ivy/functional/frontends/paddle/tensor/random.py
--- a/ivy/functional/frontends/paddle/tensor/random.py
+++ b/ivy/functional/frontends/paddle/tensor/random.py
@@ -1 +1,15 @@
# global
+import ivy
+from ivy.func_wrapper import with_supported_dtypes
+from ivy.functional.frontends.paddle.func_wrapper import (
+ to_ivy_arrays_and_back,
+)
+
+
+@with_supported_dtypes(
+ {"2.4.2 and below": ("float32", "float64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
+ return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/random.py b/ivy/functional/frontends/paddle/tensor/random.py\n--- a/ivy/functional/frontends/paddle/tensor/random.py\n+++ b/ivy/functional/frontends/paddle/tensor/random.py\n@@ -1 +1,15 @@\n # global\n+import ivy\n+from ivy.func_wrapper import with_supported_dtypes\n+from ivy.functional.frontends.paddle.func_wrapper import (\n+ to_ivy_arrays_and_back,\n+)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n+ return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n", "issue": "uniform\n\n", "before_files": [{"content": "# global\n", "path": "ivy/functional/frontends/paddle/tensor/random.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/paddle/tensor/random.py"}]} | 272 | 212 |
gh_patches_debug_21213 | rasdani/github-patches | git_diff | crytic__slither-2310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug-Candidate]: --disable-color ignored, printer produces colored outputs
### Describe the issue:
Flag `--disable-color` seem to be ignored and printer produces colored output with ASCII escape characters not suitable to capture into plaintext files
```
slither --help
usage: slither target [flag]
Additional options:
...
--disable-color Disable output colorization
```
Workaround: pass the output through the following sed script:
```
slither . --print function-summary 2>&1 | sed 's/\x1b\[[0-9;]*m//g'
```
### Code example to reproduce the issue:
<img width="1192" alt="image" src="https://github.com/crytic/slither/assets/7992612/850e41d6-e60e-4383-bdb4-c6d6a385c320">
### Version:
slither --version
0.10.0
From docker image `ghcr.io/trailofbits/eth-security-toolbox:nightly`
### Relevant log output:
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/utils/myprettytable.py`
Content:
```
1 from typing import List, Dict, Union
2
3 from prettytable.colortable import ColorTable, Themes
4
5
6 class MyPrettyTable:
7 def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?
8 self._field_names = field_names
9 self._rows: List = []
10 self._options: Dict = {}
11 if pretty_align:
12 self._options["set_alignment"] = []
13 self._options["set_alignment"] += [(field_names[0], "l")]
14 for field_name in field_names[1:]:
15 self._options["set_alignment"] += [(field_name, "r")]
16 else:
17 self._options["set_alignment"] = []
18
19 def add_row(self, row: List[Union[str, List[str]]]) -> None:
20 self._rows.append(row)
21
22 def to_pretty_table(self) -> ColorTable:
23 table = ColorTable(self._field_names, theme=Themes.OCEAN)
24 for row in self._rows:
25 table.add_row(row)
26 if len(self._options["set_alignment"]):
27 for column_header, value in self._options["set_alignment"]:
28 table.align[column_header] = value
29 return table
30
31 def to_json(self) -> Dict:
32 return {"fields_names": self._field_names, "rows": self._rows}
33
34 def __str__(self) -> str:
35 return str(self.to_pretty_table())
36
37
38 # UTILITY FUNCTIONS
39
40
41 def make_pretty_table(
42 headers: list, body: dict, totals: bool = False, total_header="TOTAL"
43 ) -> MyPrettyTable:
44 """
45 Converts a dict to a MyPrettyTable. Dict keys are the row headers.
46 Args:
47 headers: str[] of column names
48 body: dict of row headers with a dict of the values
49 totals: bool optional add Totals row
50 total_header: str optional if totals is set to True this will override the default "TOTAL" header
51 Returns:
52 MyPrettyTable
53 """
54 table = MyPrettyTable(headers)
55 for row in body:
56 table_row = [row] + [body[row][key] for key in headers[1:]]
57 table.add_row(table_row)
58 if totals:
59 table.add_row(
60 [total_header] + [sum([body[row][key] for row in body]) for key in headers[1:]]
61 )
62 return table
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/utils/myprettytable.py b/slither/utils/myprettytable.py
--- a/slither/utils/myprettytable.py
+++ b/slither/utils/myprettytable.py
@@ -1,7 +1,10 @@
from typing import List, Dict, Union
+from prettytable import PrettyTable
from prettytable.colortable import ColorTable, Themes
+from slither.utils.colors import Colors
+
class MyPrettyTable:
def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?
@@ -19,8 +22,12 @@
def add_row(self, row: List[Union[str, List[str]]]) -> None:
self._rows.append(row)
- def to_pretty_table(self) -> ColorTable:
- table = ColorTable(self._field_names, theme=Themes.OCEAN)
+ def to_pretty_table(self) -> PrettyTable:
+ if Colors.COLORIZATION_ENABLED:
+ table = ColorTable(self._field_names, theme=Themes.OCEAN)
+ else:
+ table = PrettyTable(self._field_names)
+
for row in self._rows:
table.add_row(row)
if len(self._options["set_alignment"]):
| {"golden_diff": "diff --git a/slither/utils/myprettytable.py b/slither/utils/myprettytable.py\n--- a/slither/utils/myprettytable.py\n+++ b/slither/utils/myprettytable.py\n@@ -1,7 +1,10 @@\n from typing import List, Dict, Union\n \n+from prettytable import PrettyTable\n from prettytable.colortable import ColorTable, Themes\n \n+from slither.utils.colors import Colors\n+\n \n class MyPrettyTable:\n def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?\n@@ -19,8 +22,12 @@\n def add_row(self, row: List[Union[str, List[str]]]) -> None:\n self._rows.append(row)\n \n- def to_pretty_table(self) -> ColorTable:\n- table = ColorTable(self._field_names, theme=Themes.OCEAN)\n+ def to_pretty_table(self) -> PrettyTable:\n+ if Colors.COLORIZATION_ENABLED:\n+ table = ColorTable(self._field_names, theme=Themes.OCEAN)\n+ else:\n+ table = PrettyTable(self._field_names)\n+\n for row in self._rows:\n table.add_row(row)\n if len(self._options[\"set_alignment\"]):\n", "issue": "[Bug-Candidate]: --disable-color ignored, printer produces colored outputs\n### Describe the issue:\n\nFlag `--disable-color` seem to be ignored and printer produces colored output with ASCII escape characters not suitable to capture into plaintext files\r\n\r\n```\r\nslither --help \r\nusage: slither target [flag]\r\nAdditional options:\r\n...\r\n --disable-color Disable output colorization\r\n```\r\n\r\nWorkaround: pass the output through the following sed script:\r\n```\r\nslither . --print function-summary 2>&1 | sed 's/\\x1b\\[[0-9;]*m//g'\r\n```\n\n### Code example to reproduce the issue:\n\n<img width=\"1192\" alt=\"image\" src=\"https://github.com/crytic/slither/assets/7992612/850e41d6-e60e-4383-bdb4-c6d6a385c320\">\r\n\n\n### Version:\n\nslither --version\r\n0.10.0\r\n\r\nFrom docker image `ghcr.io/trailofbits/eth-security-toolbox:nightly`\n\n### Relevant log output:\n\n_No response_\n", "before_files": [{"content": "from typing import List, Dict, Union\n\nfrom prettytable.colortable import ColorTable, Themes\n\n\nclass MyPrettyTable:\n def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?\n self._field_names = field_names\n self._rows: List = []\n self._options: Dict = {}\n if pretty_align:\n self._options[\"set_alignment\"] = []\n self._options[\"set_alignment\"] += [(field_names[0], \"l\")]\n for field_name in field_names[1:]:\n self._options[\"set_alignment\"] += [(field_name, \"r\")]\n else:\n self._options[\"set_alignment\"] = []\n\n def add_row(self, row: List[Union[str, List[str]]]) -> None:\n self._rows.append(row)\n\n def to_pretty_table(self) -> ColorTable:\n table = ColorTable(self._field_names, theme=Themes.OCEAN)\n for row in self._rows:\n table.add_row(row)\n if len(self._options[\"set_alignment\"]):\n for column_header, value in self._options[\"set_alignment\"]:\n table.align[column_header] = value\n return table\n\n def to_json(self) -> Dict:\n return {\"fields_names\": self._field_names, \"rows\": self._rows}\n\n def __str__(self) -> str:\n return str(self.to_pretty_table())\n\n\n# UTILITY FUNCTIONS\n\n\ndef make_pretty_table(\n headers: list, body: dict, totals: bool = False, total_header=\"TOTAL\"\n) -> MyPrettyTable:\n \"\"\"\n Converts a dict to a MyPrettyTable. Dict keys are the row headers.\n Args:\n headers: str[] of column names\n body: dict of row headers with a dict of the values\n totals: bool optional add Totals row\n total_header: str optional if totals is set to True this will override the default \"TOTAL\" header\n Returns:\n MyPrettyTable\n \"\"\"\n table = MyPrettyTable(headers)\n for row in body:\n table_row = [row] + [body[row][key] for key in headers[1:]]\n table.add_row(table_row)\n if totals:\n table.add_row(\n [total_header] + [sum([body[row][key] for row in body]) for key in headers[1:]]\n )\n return table\n", "path": "slither/utils/myprettytable.py"}], "after_files": [{"content": "from typing import List, Dict, Union\n\nfrom prettytable import PrettyTable\nfrom prettytable.colortable import ColorTable, Themes\n\nfrom slither.utils.colors import Colors\n\n\nclass MyPrettyTable:\n def __init__(self, field_names: List[str], pretty_align: bool = True): # TODO: True by default?\n self._field_names = field_names\n self._rows: List = []\n self._options: Dict = {}\n if pretty_align:\n self._options[\"set_alignment\"] = []\n self._options[\"set_alignment\"] += [(field_names[0], \"l\")]\n for field_name in field_names[1:]:\n self._options[\"set_alignment\"] += [(field_name, \"r\")]\n else:\n self._options[\"set_alignment\"] = []\n\n def add_row(self, row: List[Union[str, List[str]]]) -> None:\n self._rows.append(row)\n\n def to_pretty_table(self) -> PrettyTable:\n if Colors.COLORIZATION_ENABLED:\n table = ColorTable(self._field_names, theme=Themes.OCEAN)\n else:\n table = PrettyTable(self._field_names)\n\n for row in self._rows:\n table.add_row(row)\n if len(self._options[\"set_alignment\"]):\n for column_header, value in self._options[\"set_alignment\"]:\n table.align[column_header] = value\n return table\n\n def to_json(self) -> Dict:\n return {\"fields_names\": self._field_names, \"rows\": self._rows}\n\n def __str__(self) -> str:\n return str(self.to_pretty_table())\n\n\n# UTILITY FUNCTIONS\n\n\ndef make_pretty_table(\n headers: list, body: dict, totals: bool = False, total_header=\"TOTAL\"\n) -> MyPrettyTable:\n \"\"\"\n Converts a dict to a MyPrettyTable. Dict keys are the row headers.\n Args:\n headers: str[] of column names\n body: dict of row headers with a dict of the values\n totals: bool optional add Totals row\n total_header: str optional if totals is set to True this will override the default \"TOTAL\" header\n Returns:\n MyPrettyTable\n \"\"\"\n table = MyPrettyTable(headers)\n for row in body:\n table_row = [row] + [body[row][key] for key in headers[1:]]\n table.add_row(table_row)\n if totals:\n table.add_row(\n [total_header] + [sum([body[row][key] for row in body]) for key in headers[1:]]\n )\n return table\n", "path": "slither/utils/myprettytable.py"}]} | 1,148 | 280 |
gh_patches_debug_13123 | rasdani/github-patches | git_diff | ietf-tools__datatracker-3727 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The non-wg list view contains things that do not belong.
The list contains things that do not belong. For example, 'geopriv' is listed as a non-wg list, but it is a concluded wg. Maybe this should be a separate issue.
_Originally posted by @russhousley in https://github.com/ietf-tools/datatracker/issues/3675#issuecomment-1075013354_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/mailinglists/views.py`
Content:
```
1 # Copyright The IETF Trust 2007, All Rights Reserved
2
3 import re
4
5 from django.shortcuts import render
6
7 import debug # pyflakes:ignore
8
9 from ietf.group.models import Group
10 from ietf.mailinglists.models import List
11
12 def groups(request):
13 groups = Group.objects.filter(type__features__acts_like_wg=True, list_archive__startswith='http').exclude(state__in=('bof', 'conclude')).order_by("acronym")
14
15 return render(request, "mailinglists/group_archives.html", { "groups": groups } )
16
17 def nonwg(request):
18 groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by("acronym")
19
20 #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]
21
22 wg_lists = set()
23 for g in groups:
24 wg_lists.add(g.acronym)
25 match = re.search(r'^(https?://mailarchive.ietf.org/arch/(browse/|search/\?email-list=))(?P<name>[^/]*)/?$', g.list_archive)
26 if match:
27 wg_lists.add(match.group('name').lower())
28
29 lists = List.objects.filter(advertised=True)
30 #debug.show('lists.count()')
31 lists = lists.exclude(name__in=wg_lists).order_by('name')
32 #debug.show('lists.count()')
33 return render(request, "mailinglists/nonwg.html", { "lists": lists } )
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/mailinglists/views.py b/ietf/mailinglists/views.py
--- a/ietf/mailinglists/views.py
+++ b/ietf/mailinglists/views.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2007, All Rights Reserved
+# Copyright The IETF Trust 2007-2022, All Rights Reserved
import re
@@ -15,7 +15,7 @@
return render(request, "mailinglists/group_archives.html", { "groups": groups } )
def nonwg(request):
- groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by("acronym")
+ groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof']).order_by("acronym")
#urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]
| {"golden_diff": "diff --git a/ietf/mailinglists/views.py b/ietf/mailinglists/views.py\n--- a/ietf/mailinglists/views.py\n+++ b/ietf/mailinglists/views.py\n@@ -1,4 +1,4 @@\n-# Copyright The IETF Trust 2007, All Rights Reserved\n+# Copyright The IETF Trust 2007-2022, All Rights Reserved\n \n import re\n \n@@ -15,7 +15,7 @@\n return render(request, \"mailinglists/group_archives.html\", { \"groups\": groups } )\n \n def nonwg(request):\n- groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by(\"acronym\")\n+ groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof']).order_by(\"acronym\")\n \n #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]\n", "issue": "The non-wg list view contains things that do not belong.\nThe list contains things that do not belong. For example, 'geopriv' is listed as a non-wg list, but it is a concluded wg. Maybe this should be a separate issue.\r\n\r\n_Originally posted by @russhousley in https://github.com/ietf-tools/datatracker/issues/3675#issuecomment-1075013354_\n", "before_files": [{"content": "# Copyright The IETF Trust 2007, All Rights Reserved\n\nimport re\n\nfrom django.shortcuts import render\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\nfrom ietf.mailinglists.models import List\n\ndef groups(request):\n groups = Group.objects.filter(type__features__acts_like_wg=True, list_archive__startswith='http').exclude(state__in=('bof', 'conclude')).order_by(\"acronym\")\n\n return render(request, \"mailinglists/group_archives.html\", { \"groups\": groups } )\n\ndef nonwg(request):\n groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof', 'conclude']).order_by(\"acronym\")\n\n #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]\n\n wg_lists = set()\n for g in groups:\n wg_lists.add(g.acronym)\n match = re.search(r'^(https?://mailarchive.ietf.org/arch/(browse/|search/\\?email-list=))(?P<name>[^/]*)/?$', g.list_archive)\n if match:\n wg_lists.add(match.group('name').lower())\n\n lists = List.objects.filter(advertised=True)\n #debug.show('lists.count()')\n lists = lists.exclude(name__in=wg_lists).order_by('name')\n #debug.show('lists.count()')\n return render(request, \"mailinglists/nonwg.html\", { \"lists\": lists } )\n", "path": "ietf/mailinglists/views.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2007-2022, All Rights Reserved\n\nimport re\n\nfrom django.shortcuts import render\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\nfrom ietf.mailinglists.models import List\n\ndef groups(request):\n groups = Group.objects.filter(type__features__acts_like_wg=True, list_archive__startswith='http').exclude(state__in=('bof', 'conclude')).order_by(\"acronym\")\n\n return render(request, \"mailinglists/group_archives.html\", { \"groups\": groups } )\n\ndef nonwg(request):\n groups = Group.objects.filter(type__features__acts_like_wg=True).exclude(state__in=['bof']).order_by(\"acronym\")\n\n #urls = [ g.list_archive for g in groups if '.ietf.org' in g.list_archive ]\n\n wg_lists = set()\n for g in groups:\n wg_lists.add(g.acronym)\n match = re.search(r'^(https?://mailarchive.ietf.org/arch/(browse/|search/\\?email-list=))(?P<name>[^/]*)/?$', g.list_archive)\n if match:\n wg_lists.add(match.group('name').lower())\n\n lists = List.objects.filter(advertised=True)\n #debug.show('lists.count()')\n lists = lists.exclude(name__in=wg_lists).order_by('name')\n #debug.show('lists.count()')\n return render(request, \"mailinglists/nonwg.html\", { \"lists\": lists } )\n", "path": "ietf/mailinglists/views.py"}]} | 751 | 219 |
gh_patches_debug_1637 | rasdani/github-patches | git_diff | pre-commit__pre-commit-67 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError while instantiating LoggingHandler (2.6)
I assume this is new-style vs old-style classes being grumpy?
```
>>> from pre_commit.logging_handler import LoggingHandler
>>> LoggingHandler(True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../py_env/lib/python2.6/site-packages/pre_commit/logging_handler.py", line 19, in __init__
super(LoggingHandler, self).__init__()
TypeError: super() argument 1 must be type, not classobj
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/logging_handler.py`
Content:
```
1
2 from __future__ import print_function
3
4 import logging
5
6 from pre_commit import color
7
8
9 LOG_LEVEL_COLORS = {
10 'DEBUG': '',
11 'INFO': '',
12 'WARNING': color.YELLOW,
13 'ERROR': color.RED,
14 }
15
16
17 class LoggingHandler(logging.Handler):
18 def __init__(self, use_color):
19 super(LoggingHandler, self).__init__()
20 self.use_color = use_color
21
22 def emit(self, record):
23 print(
24 u'{0}{1}'.format(
25 color.format_color(
26 '[{0}]'.format(record.levelname),
27 LOG_LEVEL_COLORS[record.levelname],
28 self.use_color,
29 ) + ' ' if record.levelno >= logging.WARNING else '',
30 record.getMessage(),
31 )
32 )
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/logging_handler.py b/pre_commit/logging_handler.py
--- a/pre_commit/logging_handler.py
+++ b/pre_commit/logging_handler.py
@@ -16,7 +16,7 @@
class LoggingHandler(logging.Handler):
def __init__(self, use_color):
- super(LoggingHandler, self).__init__()
+ logging.Handler.__init__(self)
self.use_color = use_color
def emit(self, record):
| {"golden_diff": "diff --git a/pre_commit/logging_handler.py b/pre_commit/logging_handler.py\n--- a/pre_commit/logging_handler.py\n+++ b/pre_commit/logging_handler.py\n@@ -16,7 +16,7 @@\n \n class LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n- super(LoggingHandler, self).__init__()\n+ logging.Handler.__init__(self)\n self.use_color = use_color\n \n def emit(self, record):\n", "issue": "TypeError while instantiating LoggingHandler (2.6)\nI assume this is new-style vs old-style classes being grumpy?\n\n```\n>>> from pre_commit.logging_handler import LoggingHandler\n>>> LoggingHandler(True)\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \".../py_env/lib/python2.6/site-packages/pre_commit/logging_handler.py\", line 19, in __init__\n super(LoggingHandler, self).__init__()\nTypeError: super() argument 1 must be type, not classobj\n```\n\n", "before_files": [{"content": "\nfrom __future__ import print_function\n\nimport logging\n\nfrom pre_commit import color\n\n\nLOG_LEVEL_COLORS = {\n 'DEBUG': '',\n 'INFO': '',\n 'WARNING': color.YELLOW,\n 'ERROR': color.RED,\n}\n\n\nclass LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n super(LoggingHandler, self).__init__()\n self.use_color = use_color\n\n def emit(self, record):\n print(\n u'{0}{1}'.format(\n color.format_color(\n '[{0}]'.format(record.levelname),\n LOG_LEVEL_COLORS[record.levelname],\n self.use_color,\n ) + ' ' if record.levelno >= logging.WARNING else '',\n record.getMessage(),\n )\n )\n", "path": "pre_commit/logging_handler.py"}], "after_files": [{"content": "\nfrom __future__ import print_function\n\nimport logging\n\nfrom pre_commit import color\n\n\nLOG_LEVEL_COLORS = {\n 'DEBUG': '',\n 'INFO': '',\n 'WARNING': color.YELLOW,\n 'ERROR': color.RED,\n}\n\n\nclass LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n logging.Handler.__init__(self)\n self.use_color = use_color\n\n def emit(self, record):\n print(\n u'{0}{1}'.format(\n color.format_color(\n '[{0}]'.format(record.levelname),\n LOG_LEVEL_COLORS[record.levelname],\n self.use_color,\n ) + ' ' if record.levelno >= logging.WARNING else '',\n record.getMessage(),\n )\n )\n", "path": "pre_commit/logging_handler.py"}]} | 594 | 96 |
gh_patches_debug_22883 | rasdani/github-patches | git_diff | getsentry__sentry-3447 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Turn the option system.logging-format into an enum.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/logging/__init__.py`
Content:
```
1 """
2 sentry.logging
3 ~~~~~~~~~~~~~~
4 :copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.
5 :license: BSD, see LICENSE for more details.
6 """
7
8 from __future__ import absolute_import
9
```
Path: `src/sentry/options/defaults.py`
Content:
```
1 """
2 sentry.options.defaults
3 ~~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 from sentry.options import (
11 FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,
12 register,
13 )
14 from sentry.utils.types import Dict, String
15
16 # Cache
17 # register('cache.backend', flags=FLAG_NOSTORE)
18 # register('cache.options', type=Dict, flags=FLAG_NOSTORE)
19
20 # System
21 register('system.admin-email', flags=FLAG_REQUIRED)
22 register('system.databases', type=Dict, flags=FLAG_NOSTORE)
23 # register('system.debug', default=False, flags=FLAG_NOSTORE)
24 register('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
25 register('system.secret-key', flags=FLAG_NOSTORE)
26 # Absolute URL to the sentry root directory. Should not include a trailing slash.
27 register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
28 register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)
29 register('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)
30
31 # Redis
32 register(
33 'redis.clusters',
34 type=Dict,
35 default={
36 'default': {
37 'hosts': {
38 0: {
39 'host': '127.0.0.1',
40 'port': 6379,
41 }
42 },
43 },
44 },
45 flags=FLAG_NOSTORE | FLAG_IMMUTABLE
46 )
47 register('redis.options', type=Dict, flags=FLAG_NOSTORE)
48
49 # symbolizer specifics
50 register('dsym.llvm-symbolizer-path', type=String)
51 register('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')
52
53 # Mail
54 register('mail.backend', default='smtp', flags=FLAG_NOSTORE)
55 register('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
56 register('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
57 register('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
58 register('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
59 register('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
60 register('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)
61 register('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
62 register('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)
63 register('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)
64 register('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
65 register('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
66
67 # SMS
68 register('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
69 register('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
70 register('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/logging/__init__.py b/src/sentry/logging/__init__.py
--- a/src/sentry/logging/__init__.py
+++ b/src/sentry/logging/__init__.py
@@ -6,3 +6,8 @@
"""
from __future__ import absolute_import
+
+
+class LoggingFormat(object):
+ HUMAN = 'human'
+ MACHINE = 'machine'
diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py
--- a/src/sentry/options/defaults.py
+++ b/src/sentry/options/defaults.py
@@ -7,6 +7,7 @@
"""
from __future__ import absolute_import, print_function
+from sentry.logging import LoggingFormat
from sentry.options import (
FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,
register,
@@ -26,7 +27,7 @@
# Absolute URL to the sentry root directory. Should not include a trailing slash.
register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)
-register('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)
+register('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_PRIORITIZE_DISK)
# Redis
register(
| {"golden_diff": "diff --git a/src/sentry/logging/__init__.py b/src/sentry/logging/__init__.py\n--- a/src/sentry/logging/__init__.py\n+++ b/src/sentry/logging/__init__.py\n@@ -6,3 +6,8 @@\n \"\"\"\n \n from __future__ import absolute_import\n+\n+\n+class LoggingFormat(object):\n+ HUMAN = 'human'\n+ MACHINE = 'machine'\ndiff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py\n--- a/src/sentry/options/defaults.py\n+++ b/src/sentry/options/defaults.py\n@@ -7,6 +7,7 @@\n \"\"\"\n from __future__ import absolute_import, print_function\n \n+from sentry.logging import LoggingFormat\n from sentry.options import (\n FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,\n register,\n@@ -26,7 +27,7 @@\n # Absolute URL to the sentry root directory. Should not include a trailing slash.\n register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\n register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)\n-register('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)\n+register('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_PRIORITIZE_DISK)\n \n # Redis\n register(\n", "issue": "Turn the option system.logging-format into an enum.\n\n", "before_files": [{"content": "\"\"\"\nsentry.logging\n~~~~~~~~~~~~~~\n:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n", "path": "src/sentry/logging/__init__.py"}, {"content": "\"\"\"\nsentry.options.defaults\n~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nfrom sentry.options import (\n FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,\n register,\n)\nfrom sentry.utils.types import Dict, String\n\n# Cache\n# register('cache.backend', flags=FLAG_NOSTORE)\n# register('cache.options', type=Dict, flags=FLAG_NOSTORE)\n\n# System\nregister('system.admin-email', flags=FLAG_REQUIRED)\nregister('system.databases', type=Dict, flags=FLAG_NOSTORE)\n# register('system.debug', default=False, flags=FLAG_NOSTORE)\nregister('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('system.secret-key', flags=FLAG_NOSTORE)\n# Absolute URL to the sentry root directory. Should not include a trailing slash.\nregister('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)\nregister('system.logging-format', default='human', flags=FLAG_PRIORITIZE_DISK)\n\n# Redis\nregister(\n 'redis.clusters',\n type=Dict,\n default={\n 'default': {\n 'hosts': {\n 0: {\n 'host': '127.0.0.1',\n 'port': 6379,\n }\n },\n },\n },\n flags=FLAG_NOSTORE | FLAG_IMMUTABLE\n)\nregister('redis.options', type=Dict, flags=FLAG_NOSTORE)\n\n# symbolizer specifics\nregister('dsym.llvm-symbolizer-path', type=String)\nregister('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')\n\n# Mail\nregister('mail.backend', default='smtp', flags=FLAG_NOSTORE)\nregister('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)\nregister('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)\nregister('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)\nregister('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\n\n# SMS\nregister('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\n", "path": "src/sentry/options/defaults.py"}], "after_files": [{"content": "\"\"\"\nsentry.logging\n~~~~~~~~~~~~~~\n:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n\n\nclass LoggingFormat(object):\n HUMAN = 'human'\n MACHINE = 'machine'\n", "path": "src/sentry/logging/__init__.py"}, {"content": "\"\"\"\nsentry.options.defaults\n~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nfrom sentry.logging import LoggingFormat\nfrom sentry.options import (\n FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,\n register,\n)\nfrom sentry.utils.types import Dict, String\n\n# Cache\n# register('cache.backend', flags=FLAG_NOSTORE)\n# register('cache.options', type=Dict, flags=FLAG_NOSTORE)\n\n# System\nregister('system.admin-email', flags=FLAG_REQUIRED)\nregister('system.databases', type=Dict, flags=FLAG_NOSTORE)\n# register('system.debug', default=False, flags=FLAG_NOSTORE)\nregister('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('system.secret-key', flags=FLAG_NOSTORE)\n# Absolute URL to the sentry root directory. Should not include a trailing slash.\nregister('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)\nregister('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_PRIORITIZE_DISK)\n\n# Redis\nregister(\n 'redis.clusters',\n type=Dict,\n default={\n 'default': {\n 'hosts': {\n 0: {\n 'host': '127.0.0.1',\n 'port': 6379,\n }\n },\n },\n },\n flags=FLAG_NOSTORE | FLAG_IMMUTABLE\n)\nregister('redis.options', type=Dict, flags=FLAG_NOSTORE)\n\n# symbolizer specifics\nregister('dsym.llvm-symbolizer-path', type=String)\nregister('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')\n\n# Mail\nregister('mail.backend', default='smtp', flags=FLAG_NOSTORE)\nregister('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)\nregister('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)\nregister('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)\nregister('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)\nregister('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\n\n# SMS\nregister('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\nregister('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)\n", "path": "src/sentry/options/defaults.py"}]} | 1,245 | 310 |
gh_patches_debug_46 | rasdani/github-patches | git_diff | archlinux__archinstall-1300 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Archinstall discover shop non-functional.
Hello,
I have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the "Discover" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.
Archinstall discover shop non-functional.
Hello,
I have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the "Discover" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/kde.py`
Content:
```
1 # A desktop environment using "KDE".
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "plasma-meta",
9 "konsole",
10 "kwrite",
11 "dolphin",
12 "ark",
13 "sddm",
14 "plasma-wayland-session",
15 "egl-wayland",
16 ]
17
18
19 # TODO: Remove hard dependency of bash (due to .bash_profile)
20
21
22 def _prep_function(*args, **kwargs):
23 """
24 Magic function called by the importing installer
25 before continuing any further. It also avoids executing any
26 other code in this stage. So it's a safe way to ask the user
27 for more input before any other installer steps start.
28 """
29
30 # KDE requires a functioning Xorg installation.
31 profile = archinstall.Profile(None, 'xorg')
32 with profile.load_instructions(namespace='xorg.py') as imported:
33 if hasattr(imported, '_prep_function'):
34 return imported._prep_function()
35 else:
36 print('Deprecated (??): xorg profile has no _prep_function() anymore')
37
38
39 """
40 def _post_install(*args, **kwargs):
41 if "nvidia" in _gfx_driver_packages:
42 print("Plasma Wayland has known compatibility issues with the proprietary Nvidia driver")
43 print("After booting, you can choose between Wayland and Xorg using the drop-down menu")
44 return True
45 """
46
47 # Ensures that this code only gets executed if executed
48 # through importlib.util.spec_from_file_location("kde", "/somewhere/kde.py")
49 # or through conventional import kde
50 if __name__ == 'kde':
51 # Install dependency profiles
52 archinstall.storage['installation_session'].install_profile('xorg')
53
54 # Install the KDE packages
55 archinstall.storage['installation_session'].add_additional_packages(__packages__)
56
57 # Enable autostart of KDE for all users
58 archinstall.storage['installation_session'].enable_service('sddm')
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/profiles/kde.py b/profiles/kde.py
--- a/profiles/kde.py
+++ b/profiles/kde.py
@@ -13,6 +13,7 @@
"sddm",
"plasma-wayland-session",
"egl-wayland",
+ "packagekit-qt5",
]
| {"golden_diff": "diff --git a/profiles/kde.py b/profiles/kde.py\n--- a/profiles/kde.py\n+++ b/profiles/kde.py\n@@ -13,6 +13,7 @@\n \t\"sddm\",\n \t\"plasma-wayland-session\",\n \t\"egl-wayland\",\n+\t\"packagekit-qt5\",\n ]\n", "issue": "Archinstall discover shop non-functional.\nHello,\r\n\r\nI have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the \"Discover\" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.\r\n\r\n\nArchinstall discover shop non-functional.\nHello,\r\n\r\nI have installed Arch with archinstall twice now, selected the desktop option then KDE but I noticed that by default the \"Discover\" shop does not want to function I have to download the packagekit-qt5 package then it functions. Just wanted to let you know.\r\n\r\n\n", "before_files": [{"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kwrite\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}], "after_files": [{"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kwrite\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n\t\"packagekit-qt5\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}]} | 935 | 76 |
gh_patches_debug_1815 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-959 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix for checkbox accessibility no longer working
### Current Behavior
Checkboxes in django admin superuser no longer generated with an associated label.
### Expected Behavior
Expect to see accessible checkboxes in django admin, no missing columns in either superuser or staff views.
### Steps to Reproduce
1. Log in as superuser
2. Go to list view on a model
3. Run ANDI or inspect checkboxes
### Environment
_No response_
### Additional Context
Traced this to the fix for missing columns in staff view. The check {% if results.0.form %} did not work and failed silently. Have a fix for this.
Will prioritize implementation and deployment to staging since we have some accessibility testing in progress.
### Issue Links
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/templatetags/custom_filters.py`
Content:
```
1 from django import template
2 import re
3
4 register = template.Library()
5
6
7 @register.filter(name="extract_value")
8 def extract_value(html_input):
9 match = re.search(r'value="([^"]*)"', html_input)
10 if match:
11 return match.group(1)
12 return ""
13
14
15 @register.filter
16 def extract_a_text(value):
17 # Use regex to extract the text within the <a> tag
18 pattern = r"<a\b[^>]*>(.*?)</a>"
19 match = re.search(pattern, value)
20 if match:
21 extracted_text = match.group(1)
22 else:
23 extracted_text = ""
24
25 return extracted_text
26
27
28 @register.filter
29 def find_index(haystack, needle):
30 try:
31 return haystack.index(needle)
32 except ValueError:
33 return -1
34
35
36 @register.filter
37 def slice_after(value, substring):
38 index = value.find(substring)
39 if index != -1:
40 result = value[index + len(substring) :]
41 return result
42 return value
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py
--- a/src/registrar/templatetags/custom_filters.py
+++ b/src/registrar/templatetags/custom_filters.py
@@ -40,3 +40,11 @@
result = value[index + len(substring) :]
return result
return value
+
+
[email protected]
+def contains_checkbox(html_list):
+ for html_string in html_list:
+ if re.search(r'<input[^>]*type="checkbox"', html_string):
+ return True
+ return False
| {"golden_diff": "diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py\n--- a/src/registrar/templatetags/custom_filters.py\n+++ b/src/registrar/templatetags/custom_filters.py\n@@ -40,3 +40,11 @@\n result = value[index + len(substring) :]\n return result\n return value\n+\n+\[email protected]\n+def contains_checkbox(html_list):\n+ for html_string in html_list:\n+ if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n+ return True\n+ return False\n", "issue": "Fix for checkbox accessibility no longer working\n### Current Behavior\n\nCheckboxes in django admin superuser no longer generated with an associated label.\n\n### Expected Behavior\n\nExpect to see accessible checkboxes in django admin, no missing columns in either superuser or staff views.\n\n### Steps to Reproduce\n\n1. Log in as superuser\r\n2. Go to list view on a model\r\n3. Run ANDI or inspect checkboxes\r\n\n\n### Environment\n\n_No response_\n\n### Additional Context\n\nTraced this to the fix for missing columns in staff view. The check {% if results.0.form %} did not work and failed silently. Have a fix for this.\r\n\r\nWill prioritize implementation and deployment to staging since we have some accessibility testing in progress.\n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "from django import template\nimport re\n\nregister = template.Library()\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n", "path": "src/registrar/templatetags/custom_filters.py"}], "after_files": [{"content": "from django import template\nimport re\n\nregister = template.Library()\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n", "path": "src/registrar/templatetags/custom_filters.py"}]} | 715 | 139 |
gh_patches_debug_2449 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-10168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PubSub: declaratively drop Python 3.4 support
The README and the language classifiers in `setup.py` both only claim support for Python 3.5+ (and 2.7), but not Python 3.4. However, the `python_requires` in `setup.py` does not reflect that, and does not prevent installing the library in Python 3.4.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pubsub/setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-pubsub"
24 description = "Google Cloud Pub/Sub API client library"
25 version = "1.1.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
33 "grpc-google-iam-v1 >= 0.12.3, < 0.13dev",
34 'enum34; python_version < "3.4"',
35 ]
36 extras = {}
37
38
39 # Setup boilerplate below this line.
40
41 package_root = os.path.abspath(os.path.dirname(__file__))
42
43 readme_filename = os.path.join(package_root, "README.rst")
44 with io.open(readme_filename, encoding="utf-8") as readme_file:
45 readme = readme_file.read()
46
47 # Only include packages under the 'google' namespace. Do not include tests,
48 # benchmarks, etc.
49 packages = [
50 package for package in setuptools.find_packages() if package.startswith("google")
51 ]
52
53 # Determine which namespaces are needed.
54 namespaces = ["google"]
55 if "google.cloud" in packages:
56 namespaces.append("google.cloud")
57
58
59 setuptools.setup(
60 name=name,
61 version=version,
62 description=description,
63 long_description=readme,
64 author="Google LLC",
65 author_email="[email protected]",
66 license="Apache 2.0",
67 url="https://github.com/GoogleCloudPlatform/google-cloud-python",
68 classifiers=[
69 release_status,
70 "Intended Audience :: Developers",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.5",
77 "Programming Language :: Python :: 3.6",
78 "Programming Language :: Python :: 3.7",
79 "Operating System :: OS Independent",
80 "Topic :: Internet",
81 ],
82 platforms="Posix; MacOS X; Windows",
83 packages=packages,
84 namespace_packages=namespaces,
85 install_requires=dependencies,
86 extras_require=extras,
87 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
88 include_package_data=True,
89 zip_safe=False,
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pubsub/setup.py b/pubsub/setup.py
--- a/pubsub/setup.py
+++ b/pubsub/setup.py
@@ -84,7 +84,7 @@
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
- python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
+ python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
include_package_data=True,
zip_safe=False,
)
| {"golden_diff": "diff --git a/pubsub/setup.py b/pubsub/setup.py\n--- a/pubsub/setup.py\n+++ b/pubsub/setup.py\n@@ -84,7 +84,7 @@\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n- python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n+ python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n include_package_data=True,\n zip_safe=False,\n )\n", "issue": "PubSub: declaratively drop Python 3.4 support\nThe README and the language classifiers in `setup.py` both only claim support for Python 3.5+ (and 2.7), but not Python 3.4. However, the `python_requires` in `setup.py` does not reflect that, and does not prevent installing the library in Python 3.4.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-pubsub\"\ndescription = \"Google Cloud Pub/Sub API client library\"\nversion = \"1.1.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.14.0, < 2.0.0dev\",\n \"grpc-google-iam-v1 >= 0.12.3, < 0.13dev\",\n 'enum34; python_version < \"3.4\"',\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-pubsub\"\ndescription = \"Google Cloud Pub/Sub API client library\"\nversion = \"1.1.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.14.0, < 2.0.0dev\",\n \"grpc-google-iam-v1 >= 0.12.3, < 0.13dev\",\n 'enum34; python_version < \"3.4\"',\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py"}]} | 1,205 | 138 |
gh_patches_debug_2406 | rasdani/github-patches | git_diff | buildbot__buildbot-3490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnboundLocalError in mq/base.py on master shutdown
Hello,
We're using buildbot in multi-master mode and got this stacktrace on one of the master when shutting it down:
```
2017-07-17 12:33:29+0000 [-] Waiting for 1 build(s) to finish
2017-07-17 12:33:29+0000 [-] Builder <Builder 'u'sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0'' at 140555339856784> has 1 builds running
2017-07-17 12:33:29+0000 [-] Not shutting down, there are 1 builds running
2017-07-17 12:33:29+0000 [-] Trying shutdown sequence again
2017-07-17 12:33:30+0000 [-] <Build sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0 number:32108L results:exception>: stopping build: Master Shutdown 5
2017-07-17 12:33:30+0000 [-] Unhandled error in Deferred:
2017-07-17 12:33:30+0000 [-] Unhandled Error
Traceback (most recent call last):
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
result = g.send(result)
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/botmaster.py", line 105, in cleanShutdown
l.append(build.waitUntilFinished())
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/build.py", line 687, in waitUntilFinished
lambda: self.finished)
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
— <exception caught here> —
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
result = g.send(result)
File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/mq/base.py", line 40, in waitUntilEvent
defer.returnValue(res)
exceptions.UnboundLocalError: local variable 'res' referenced before assignment
```
Looking at the code at the end of `waitUntilEvent()`:
```
if not check:
res = yield d
yield buildCompleteConsumer.stopConsuming
defer.returnValue(res)
```
If the check returned false, we try to return a value (`res`) that was never defined.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/mq/base.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18
19 from twisted.internet import defer
20 from twisted.python import failure
21 from twisted.python import log
22
23 from buildbot.util import service
24
25
26 class MQBase(service.AsyncService):
27 name = 'mq-implementation'
28
29 @defer.inlineCallbacks
30 def waitUntilEvent(self, filter, check_callback):
31 d = defer.Deferred()
32 buildCompleteConsumer = yield self.startConsuming(
33 lambda key, value: d.callback((key, value)),
34 filter)
35 check = yield check_callback()
36 # we only wait if the check callback return true
37 if not check:
38 res = yield d
39 yield buildCompleteConsumer.stopConsuming
40 defer.returnValue(res)
41
42
43 class QueueRef(object):
44
45 __slots__ = ['callback']
46
47 def __init__(self, callback):
48 self.callback = callback
49
50 def invoke(self, routing_key, data):
51 if not self.callback:
52 return
53
54 try:
55 x = self.callback(routing_key, data)
56 except Exception:
57 log.err(failure.Failure(), 'while invoking %r' % (self.callback,))
58 return
59 if isinstance(x, defer.Deferred):
60 x.addErrback(log.err, 'while invoking %r' % (self.callback,))
61
62 def stopConsuming(self):
63 # subclasses should set self.callback to None in this method
64 raise NotImplementedError
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/mq/base.py b/master/buildbot/mq/base.py
--- a/master/buildbot/mq/base.py
+++ b/master/buildbot/mq/base.py
@@ -36,7 +36,9 @@
# we only wait if the check callback return true
if not check:
res = yield d
- yield buildCompleteConsumer.stopConsuming
+ else:
+ res = None
+ yield buildCompleteConsumer.stopConsuming()
defer.returnValue(res)
| {"golden_diff": "diff --git a/master/buildbot/mq/base.py b/master/buildbot/mq/base.py\n--- a/master/buildbot/mq/base.py\n+++ b/master/buildbot/mq/base.py\n@@ -36,7 +36,9 @@\n # we only wait if the check callback return true\n if not check:\n res = yield d\n- yield buildCompleteConsumer.stopConsuming\n+ else:\n+ res = None\n+ yield buildCompleteConsumer.stopConsuming()\n defer.returnValue(res)\n", "issue": "UnboundLocalError in mq/base.py on master shutdown\nHello,\r\n\r\nWe're using buildbot in multi-master mode and got this stacktrace on one of the master when shutting it down:\r\n```\r\n2017-07-17 12:33:29+0000 [-] Waiting for 1 build(s) to finish\r\n2017-07-17 12:33:29+0000 [-] Builder <Builder 'u'sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0'' at 140555339856784> has 1 builds running\r\n2017-07-17 12:33:29+0000 [-] Not shutting down, there are 1 builds running\r\n2017-07-17 12:33:29+0000 [-] Trying shutdown sequence again\r\n2017-07-17 12:33:30+0000 [-] <Build sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0 number:32108L results:exception>: stopping build: Master Shutdown 5\r\n2017-07-17 12:33:30+0000 [-] Unhandled error in Deferred:\r\n2017-07-17 12:33:30+0000 [-] Unhandled Error\r\nTraceback (most recent call last):\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1299, in _inlineCallbacks\r\nresult = g.send(result)\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/botmaster.py\", line 105, in cleanShutdown\r\nl.append(build.waitUntilFinished())\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/build.py\", line 687, in waitUntilFinished\r\nlambda: self.finished)\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1445, in unwindGenerator\r\nreturn _inlineCallbacks(None, gen, Deferred())\r\n\u2014 <exception caught here> \u2014\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py\", line 1299, in _inlineCallbacks\r\nresult = g.send(result)\r\nFile \"/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/mq/base.py\", line 40, in waitUntilEvent\r\ndefer.returnValue(res)\r\nexceptions.UnboundLocalError: local variable 'res' referenced before assignment\r\n```\r\nLooking at the code at the end of `waitUntilEvent()`:\r\n```\r\n if not check:\r\n res = yield d\r\n yield buildCompleteConsumer.stopConsuming\r\n defer.returnValue(res)\r\n```\r\n\r\nIf the check returned false, we try to return a value (`res`) that was never defined.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.python import failure\nfrom twisted.python import log\n\nfrom buildbot.util import service\n\n\nclass MQBase(service.AsyncService):\n name = 'mq-implementation'\n\n @defer.inlineCallbacks\n def waitUntilEvent(self, filter, check_callback):\n d = defer.Deferred()\n buildCompleteConsumer = yield self.startConsuming(\n lambda key, value: d.callback((key, value)),\n filter)\n check = yield check_callback()\n # we only wait if the check callback return true\n if not check:\n res = yield d\n yield buildCompleteConsumer.stopConsuming\n defer.returnValue(res)\n\n\nclass QueueRef(object):\n\n __slots__ = ['callback']\n\n def __init__(self, callback):\n self.callback = callback\n\n def invoke(self, routing_key, data):\n if not self.callback:\n return\n\n try:\n x = self.callback(routing_key, data)\n except Exception:\n log.err(failure.Failure(), 'while invoking %r' % (self.callback,))\n return\n if isinstance(x, defer.Deferred):\n x.addErrback(log.err, 'while invoking %r' % (self.callback,))\n\n def stopConsuming(self):\n # subclasses should set self.callback to None in this method\n raise NotImplementedError\n", "path": "master/buildbot/mq/base.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.python import failure\nfrom twisted.python import log\n\nfrom buildbot.util import service\n\n\nclass MQBase(service.AsyncService):\n name = 'mq-implementation'\n\n @defer.inlineCallbacks\n def waitUntilEvent(self, filter, check_callback):\n d = defer.Deferred()\n buildCompleteConsumer = yield self.startConsuming(\n lambda key, value: d.callback((key, value)),\n filter)\n check = yield check_callback()\n # we only wait if the check callback return true\n if not check:\n res = yield d\n else:\n res = None\n yield buildCompleteConsumer.stopConsuming()\n defer.returnValue(res)\n\n\nclass QueueRef(object):\n\n __slots__ = ['callback']\n\n def __init__(self, callback):\n self.callback = callback\n\n def invoke(self, routing_key, data):\n if not self.callback:\n return\n\n try:\n x = self.callback(routing_key, data)\n except Exception:\n log.err(failure.Failure(), 'while invoking %r' % (self.callback,))\n return\n if isinstance(x, defer.Deferred):\n x.addErrback(log.err, 'while invoking %r' % (self.callback,))\n\n def stopConsuming(self):\n # subclasses should set self.callback to None in this method\n raise NotImplementedError\n", "path": "master/buildbot/mq/base.py"}]} | 1,566 | 110 |
gh_patches_debug_21185 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Your books: All books: shelved date is incorrect
**Describe the bug**
I just started using Bookwyrm, and added 4 books as "To Read". On my "All Books" page, the "Shelved" dates for 3 of those books are incorrect. https://bookwyrm.social/user/chorist/books
If I click over to my "To Read" page however, the Shelved dates are all correct (all showing "today").
**Screenshots**
<img width="1181" alt="Screen Shot 2022-01-18 at 4 52 23 PM" src="https://user-images.githubusercontent.com/557851/150031715-652dc082-a45a-4e71-af7f-efc34dfb0de9.png">
**Instance**
bookwyrm.social
**Desktop (please complete the following information):**
- OS: MacOS
- Browser: Safari
- Version 15.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/shelf/shelf.py`
Content:
```
1 """ shelf views """
2 from collections import namedtuple
3
4 from django.db.models import OuterRef, Subquery, F
5 from django.contrib.auth.decorators import login_required
6 from django.core.paginator import Paginator
7 from django.http import HttpResponseBadRequest
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.utils.decorators import method_decorator
11 from django.utils.translation import gettext_lazy as _
12 from django.views import View
13
14 from bookwyrm import forms, models
15 from bookwyrm.activitypub import ActivitypubResponse
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.views.helpers import is_api_request, get_user_from_username
18
19
20 # pylint: disable=no-self-use
21 class Shelf(View):
22 """shelf page"""
23
24 def get(self, request, username, shelf_identifier=None):
25 """display a shelf"""
26 user = get_user_from_username(request.user, username)
27
28 is_self = user == request.user
29
30 if is_self:
31 shelves = user.shelf_set.all()
32 else:
33 shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()
34
35 # get the shelf and make sure the logged in user should be able to see it
36 if shelf_identifier:
37 shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)
38 shelf.raise_visible_to_user(request.user)
39 books = shelf.books
40 else:
41 # this is a constructed "all books" view, with a fake "shelf" obj
42 FakeShelf = namedtuple(
43 "Shelf", ("identifier", "name", "user", "books", "privacy")
44 )
45 books = (
46 models.Edition.viewer_aware_objects(request.user)
47 .filter(
48 # privacy is ensured because the shelves are already filtered above
49 shelfbook__shelf__in=shelves
50 )
51 .distinct()
52 )
53 shelf = FakeShelf("all", _("All books"), user, books, "public")
54
55 if is_api_request(request) and shelf_identifier:
56 return ActivitypubResponse(shelf.to_activity(**request.GET))
57
58 reviews = models.Review.objects
59 if not is_self:
60 reviews = models.Review.privacy_filter(request.user)
61
62 reviews = reviews.filter(
63 user=user,
64 rating__isnull=False,
65 book__id=OuterRef("id"),
66 deleted=False,
67 ).order_by("-published_date")
68
69 reading = models.ReadThrough.objects
70
71 reading = reading.filter(user=user, book__id=OuterRef("id")).order_by(
72 "start_date"
73 )
74
75 if shelf_identifier:
76 books = books.annotate(shelved_date=F("shelfbook__shelved_date"))
77 else:
78 # sorting by shelved date will cause duplicates in the "all books" view
79 books = books.annotate(shelved_date=F("updated_date"))
80 books = books.annotate(
81 rating=Subquery(reviews.values("rating")[:1]),
82 start_date=Subquery(reading.values("start_date")[:1]),
83 finish_date=Subquery(reading.values("finish_date")[:1]),
84 author=Subquery(
85 models.Book.objects.filter(id=OuterRef("id")).values("authors__name")[
86 :1
87 ]
88 ),
89 ).prefetch_related("authors")
90
91 books = sort_books(books, request.GET.get("sort"))
92
93 paginated = Paginator(
94 books,
95 PAGE_LENGTH,
96 )
97 page = paginated.get_page(request.GET.get("page"))
98 data = {
99 "user": user,
100 "is_self": is_self,
101 "shelves": shelves,
102 "shelf": shelf,
103 "books": page,
104 "edit_form": forms.ShelfForm(instance=shelf if shelf_identifier else None),
105 "create_form": forms.ShelfForm(),
106 "sort": request.GET.get("sort"),
107 "page_range": paginated.get_elided_page_range(
108 page.number, on_each_side=2, on_ends=1
109 ),
110 }
111
112 return TemplateResponse(request, "shelf/shelf.html", data)
113
114 @method_decorator(login_required, name="dispatch")
115 # pylint: disable=unused-argument
116 def post(self, request, username, shelf_identifier):
117 """edit a shelf"""
118 user = get_user_from_username(request.user, username)
119 shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)
120 shelf.raise_not_editable(request.user)
121
122 # you can't change the name of the default shelves
123 if not shelf.editable and request.POST.get("name") != shelf.name:
124 return HttpResponseBadRequest()
125
126 form = forms.ShelfForm(request.POST, instance=shelf)
127 if not form.is_valid():
128 return redirect(shelf.local_path)
129 shelf = form.save()
130 return redirect(shelf.local_path)
131
132
133 def sort_books(books, sort):
134 """Books in shelf sorting"""
135 sort_fields = [
136 "title",
137 "author",
138 "shelved_date",
139 "start_date",
140 "finish_date",
141 "rating",
142 ]
143
144 if sort in sort_fields:
145 books = books.order_by(sort)
146 elif sort and sort[1:] in sort_fields:
147 books = books.order_by(F(sort[1:]).desc(nulls_last=True))
148 else:
149 books = books.order_by("-shelved_date")
150 return books
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/shelf/shelf.py b/bookwyrm/views/shelf/shelf.py
--- a/bookwyrm/views/shelf/shelf.py
+++ b/bookwyrm/views/shelf/shelf.py
@@ -1,7 +1,7 @@
""" shelf views """
from collections import namedtuple
-from django.db.models import OuterRef, Subquery, F
+from django.db.models import OuterRef, Subquery, F, Max
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpResponseBadRequest
@@ -72,11 +72,7 @@
"start_date"
)
- if shelf_identifier:
- books = books.annotate(shelved_date=F("shelfbook__shelved_date"))
- else:
- # sorting by shelved date will cause duplicates in the "all books" view
- books = books.annotate(shelved_date=F("updated_date"))
+ books = books.annotate(shelved_date=Max("shelfbook__shelved_date"))
books = books.annotate(
rating=Subquery(reviews.values("rating")[:1]),
start_date=Subquery(reading.values("start_date")[:1]),
| {"golden_diff": "diff --git a/bookwyrm/views/shelf/shelf.py b/bookwyrm/views/shelf/shelf.py\n--- a/bookwyrm/views/shelf/shelf.py\n+++ b/bookwyrm/views/shelf/shelf.py\n@@ -1,7 +1,7 @@\n \"\"\" shelf views \"\"\"\n from collections import namedtuple\n \n-from django.db.models import OuterRef, Subquery, F\n+from django.db.models import OuterRef, Subquery, F, Max\n from django.contrib.auth.decorators import login_required\n from django.core.paginator import Paginator\n from django.http import HttpResponseBadRequest\n@@ -72,11 +72,7 @@\n \"start_date\"\n )\n \n- if shelf_identifier:\n- books = books.annotate(shelved_date=F(\"shelfbook__shelved_date\"))\n- else:\n- # sorting by shelved date will cause duplicates in the \"all books\" view\n- books = books.annotate(shelved_date=F(\"updated_date\"))\n+ books = books.annotate(shelved_date=Max(\"shelfbook__shelved_date\"))\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n", "issue": "Your books: All books: shelved date is incorrect\n**Describe the bug**\r\n\r\nI just started using Bookwyrm, and added 4 books as \"To Read\". On my \"All Books\" page, the \"Shelved\" dates for 3 of those books are incorrect. https://bookwyrm.social/user/chorist/books\r\n\r\nIf I click over to my \"To Read\" page however, the Shelved dates are all correct (all showing \"today\").\r\n\r\n**Screenshots**\r\n\r\n<img width=\"1181\" alt=\"Screen Shot 2022-01-18 at 4 52 23 PM\" src=\"https://user-images.githubusercontent.com/557851/150031715-652dc082-a45a-4e71-af7f-efc34dfb0de9.png\">\r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS\r\n - Browser: Safari\r\n - Version 15.2\r\n\r\n\n", "before_files": [{"content": "\"\"\" shelf views \"\"\"\nfrom collections import namedtuple\n\nfrom django.db.models import OuterRef, Subquery, F\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request, get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Shelf(View):\n \"\"\"shelf page\"\"\"\n\n def get(self, request, username, shelf_identifier=None):\n \"\"\"display a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n\n is_self = user == request.user\n\n if is_self:\n shelves = user.shelf_set.all()\n else:\n shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()\n\n # get the shelf and make sure the logged in user should be able to see it\n if shelf_identifier:\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_visible_to_user(request.user)\n books = shelf.books\n else:\n # this is a constructed \"all books\" view, with a fake \"shelf\" obj\n FakeShelf = namedtuple(\n \"Shelf\", (\"identifier\", \"name\", \"user\", \"books\", \"privacy\")\n )\n books = (\n models.Edition.viewer_aware_objects(request.user)\n .filter(\n # privacy is ensured because the shelves are already filtered above\n shelfbook__shelf__in=shelves\n )\n .distinct()\n )\n shelf = FakeShelf(\"all\", _(\"All books\"), user, books, \"public\")\n\n if is_api_request(request) and shelf_identifier:\n return ActivitypubResponse(shelf.to_activity(**request.GET))\n\n reviews = models.Review.objects\n if not is_self:\n reviews = models.Review.privacy_filter(request.user)\n\n reviews = reviews.filter(\n user=user,\n rating__isnull=False,\n book__id=OuterRef(\"id\"),\n deleted=False,\n ).order_by(\"-published_date\")\n\n reading = models.ReadThrough.objects\n\n reading = reading.filter(user=user, book__id=OuterRef(\"id\")).order_by(\n \"start_date\"\n )\n\n if shelf_identifier:\n books = books.annotate(shelved_date=F(\"shelfbook__shelved_date\"))\n else:\n # sorting by shelved date will cause duplicates in the \"all books\" view\n books = books.annotate(shelved_date=F(\"updated_date\"))\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n models.Book.objects.filter(id=OuterRef(\"id\")).values(\"authors__name\")[\n :1\n ]\n ),\n ).prefetch_related(\"authors\")\n\n books = sort_books(books, request.GET.get(\"sort\"))\n\n paginated = Paginator(\n books,\n PAGE_LENGTH,\n )\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelves,\n \"shelf\": shelf,\n \"books\": page,\n \"edit_form\": forms.ShelfForm(instance=shelf if shelf_identifier else None),\n \"create_form\": forms.ShelfForm(),\n \"sort\": request.GET.get(\"sort\"),\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"shelf/shelf.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username, shelf_identifier):\n \"\"\"edit a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_not_editable(request.user)\n\n # you can't change the name of the default shelves\n if not shelf.editable and request.POST.get(\"name\") != shelf.name:\n return HttpResponseBadRequest()\n\n form = forms.ShelfForm(request.POST, instance=shelf)\n if not form.is_valid():\n return redirect(shelf.local_path)\n shelf = form.save()\n return redirect(shelf.local_path)\n\n\ndef sort_books(books, sort):\n \"\"\"Books in shelf sorting\"\"\"\n sort_fields = [\n \"title\",\n \"author\",\n \"shelved_date\",\n \"start_date\",\n \"finish_date\",\n \"rating\",\n ]\n\n if sort in sort_fields:\n books = books.order_by(sort)\n elif sort and sort[1:] in sort_fields:\n books = books.order_by(F(sort[1:]).desc(nulls_last=True))\n else:\n books = books.order_by(\"-shelved_date\")\n return books\n", "path": "bookwyrm/views/shelf/shelf.py"}], "after_files": [{"content": "\"\"\" shelf views \"\"\"\nfrom collections import namedtuple\n\nfrom django.db.models import OuterRef, Subquery, F, Max\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request, get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Shelf(View):\n \"\"\"shelf page\"\"\"\n\n def get(self, request, username, shelf_identifier=None):\n \"\"\"display a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n\n is_self = user == request.user\n\n if is_self:\n shelves = user.shelf_set.all()\n else:\n shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()\n\n # get the shelf and make sure the logged in user should be able to see it\n if shelf_identifier:\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_visible_to_user(request.user)\n books = shelf.books\n else:\n # this is a constructed \"all books\" view, with a fake \"shelf\" obj\n FakeShelf = namedtuple(\n \"Shelf\", (\"identifier\", \"name\", \"user\", \"books\", \"privacy\")\n )\n books = (\n models.Edition.viewer_aware_objects(request.user)\n .filter(\n # privacy is ensured because the shelves are already filtered above\n shelfbook__shelf__in=shelves\n )\n .distinct()\n )\n shelf = FakeShelf(\"all\", _(\"All books\"), user, books, \"public\")\n\n if is_api_request(request) and shelf_identifier:\n return ActivitypubResponse(shelf.to_activity(**request.GET))\n\n reviews = models.Review.objects\n if not is_self:\n reviews = models.Review.privacy_filter(request.user)\n\n reviews = reviews.filter(\n user=user,\n rating__isnull=False,\n book__id=OuterRef(\"id\"),\n deleted=False,\n ).order_by(\"-published_date\")\n\n reading = models.ReadThrough.objects\n\n reading = reading.filter(user=user, book__id=OuterRef(\"id\")).order_by(\n \"start_date\"\n )\n\n books = books.annotate(shelved_date=Max(\"shelfbook__shelved_date\"))\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n models.Book.objects.filter(id=OuterRef(\"id\")).values(\"authors__name\")[\n :1\n ]\n ),\n ).prefetch_related(\"authors\")\n\n books = sort_books(books, request.GET.get(\"sort\"))\n\n paginated = Paginator(\n books,\n PAGE_LENGTH,\n )\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelves,\n \"shelf\": shelf,\n \"books\": page,\n \"edit_form\": forms.ShelfForm(instance=shelf if shelf_identifier else None),\n \"create_form\": forms.ShelfForm(),\n \"sort\": request.GET.get(\"sort\"),\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"shelf/shelf.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username, shelf_identifier):\n \"\"\"edit a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_not_editable(request.user)\n\n # you can't change the name of the default shelves\n if not shelf.editable and request.POST.get(\"name\") != shelf.name:\n return HttpResponseBadRequest()\n\n form = forms.ShelfForm(request.POST, instance=shelf)\n if not form.is_valid():\n return redirect(shelf.local_path)\n shelf = form.save()\n return redirect(shelf.local_path)\n\n\ndef sort_books(books, sort):\n \"\"\"Books in shelf sorting\"\"\"\n sort_fields = [\n \"title\",\n \"author\",\n \"shelved_date\",\n \"start_date\",\n \"finish_date\",\n \"rating\",\n ]\n\n if sort in sort_fields:\n books = books.order_by(sort)\n elif sort and sort[1:] in sort_fields:\n books = books.order_by(F(sort[1:]).desc(nulls_last=True))\n else:\n books = books.order_by(\"-shelved_date\")\n return books\n", "path": "bookwyrm/views/shelf/shelf.py"}]} | 1,989 | 261 |
gh_patches_debug_57017 | rasdani/github-patches | git_diff | fidals__shopelectro-995 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Resolve stuck tests
CI fails because of stuck tests. They are working at the local and relevant code looks like they should pass
https://ci.fidals.com/fidals/shopelectro/1727/9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/settings/drone.py`
Content:
```
1 """Settings especially for drone CI."""
2
3 from .base import *
4
5
6 DEBUG = True
7
8 # http://bit.ly/sorl-thumbnail-docs
9 THUMBNAIL_DEBUG = True
10
11 SITE_DOMAIN_NAME = 'stage.shopelectro.ru'
12
13 YANDEX_KASSA_LINK = 'https://demomoney.yandex.ru/eshop.xml'
14
15 SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')
16 SELENIUM_WAIT_SECONDS = int(os.environ['SELENIUM_WAIT_SECONDS'])
17 SELENIUM_TIMEOUT_SECONDS = int(os.environ['SELENIUM_TIMEOUT_SECONDS'])
18 SELENIUM_IMPLICIT_WAIT = int(os.environ['SELENIUM_IMPLICIT_WAIT'])
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/settings/drone.py b/shopelectro/settings/drone.py
--- a/shopelectro/settings/drone.py
+++ b/shopelectro/settings/drone.py
@@ -5,6 +5,15 @@
DEBUG = True
+# Header categories menu uses cache in templates.
+# Disable cache to avoid stale menu testing.
+# See #991 for details.
+CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
+ }
+}
+
# http://bit.ly/sorl-thumbnail-docs
THUMBNAIL_DEBUG = True
| {"golden_diff": "diff --git a/shopelectro/settings/drone.py b/shopelectro/settings/drone.py\n--- a/shopelectro/settings/drone.py\n+++ b/shopelectro/settings/drone.py\n@@ -5,6 +5,15 @@\n \n DEBUG = True\n \n+# Header categories menu uses cache in templates.\n+# Disable cache to avoid stale menu testing.\n+# See #991 for details.\n+CACHES = {\n+ 'default': {\n+ 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n+ }\n+}\n+\n # http://bit.ly/sorl-thumbnail-docs\n THUMBNAIL_DEBUG = True\n", "issue": "Resolve stuck tests\nCI fails because of stuck tests. They are working at the local and relevant code looks like they should pass\r\nhttps://ci.fidals.com/fidals/shopelectro/1727/9\n", "before_files": [{"content": "\"\"\"Settings especially for drone CI.\"\"\"\n\nfrom .base import *\n\n\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = True\n\nSITE_DOMAIN_NAME = 'stage.shopelectro.ru'\n\nYANDEX_KASSA_LINK = 'https://demomoney.yandex.ru/eshop.xml'\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\nSELENIUM_WAIT_SECONDS = int(os.environ['SELENIUM_WAIT_SECONDS'])\nSELENIUM_TIMEOUT_SECONDS = int(os.environ['SELENIUM_TIMEOUT_SECONDS'])\nSELENIUM_IMPLICIT_WAIT = int(os.environ['SELENIUM_IMPLICIT_WAIT'])\n", "path": "shopelectro/settings/drone.py"}], "after_files": [{"content": "\"\"\"Settings especially for drone CI.\"\"\"\n\nfrom .base import *\n\n\nDEBUG = True\n\n# Header categories menu uses cache in templates.\n# Disable cache to avoid stale menu testing.\n# See #991 for details.\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n}\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = True\n\nSITE_DOMAIN_NAME = 'stage.shopelectro.ru'\n\nYANDEX_KASSA_LINK = 'https://demomoney.yandex.ru/eshop.xml'\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\nSELENIUM_WAIT_SECONDS = int(os.environ['SELENIUM_WAIT_SECONDS'])\nSELENIUM_TIMEOUT_SECONDS = int(os.environ['SELENIUM_TIMEOUT_SECONDS'])\nSELENIUM_IMPLICIT_WAIT = int(os.environ['SELENIUM_IMPLICIT_WAIT'])\n", "path": "shopelectro/settings/drone.py"}]} | 503 | 141 |
gh_patches_debug_18393 | rasdani/github-patches | git_diff | tensorflow__addons-834 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add nightly tests for windows/macos
Currently we only test our nightlies on linux:
https://github.com/tensorflow/addons/blob/master/.travis.yml#L17
It should be relatively simple to enable tests for macos/windows, with the one caveat that `tf-nightly` is not published for windows.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/losses/__init__.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Additional losses that conform to Keras API."""
16
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 from tensorflow_addons.losses.contrastive import contrastive_loss, ContrastiveLoss
22 from tensorflow_addons.losses.focal_loss import sigmoid_focal_crossentropy, SigmoidFocalCrossEntropy
23 from tensorflow_addons.losses.giou_loss import giou_loss, GIoULoss
24 from tensorflow_addons.losses.lifted import lifted_struct_loss, LiftedStructLoss
25 from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss
26 from tensorflow_addons.losses.sparsemax_loss import sparsemax_loss, SparsemaxLoss
27 from tensorflow_addons.losses.triplet import triplet_semihard_loss, TripletSemiHardLoss
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorflow_addons/losses/__init__.py b/tensorflow_addons/losses/__init__.py
--- a/tensorflow_addons/losses/__init__.py
+++ b/tensorflow_addons/losses/__init__.py
@@ -22,6 +22,11 @@
from tensorflow_addons.losses.focal_loss import sigmoid_focal_crossentropy, SigmoidFocalCrossEntropy
from tensorflow_addons.losses.giou_loss import giou_loss, GIoULoss
from tensorflow_addons.losses.lifted import lifted_struct_loss, LiftedStructLoss
-from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss
from tensorflow_addons.losses.sparsemax_loss import sparsemax_loss, SparsemaxLoss
from tensorflow_addons.losses.triplet import triplet_semihard_loss, TripletSemiHardLoss
+
+# Temporarily disable for windows
+# Remove after: https://github.com/tensorflow/addons/issues/838
+import os
+if os.name != 'nt':
+ from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss
| {"golden_diff": "diff --git a/tensorflow_addons/losses/__init__.py b/tensorflow_addons/losses/__init__.py\n--- a/tensorflow_addons/losses/__init__.py\n+++ b/tensorflow_addons/losses/__init__.py\n@@ -22,6 +22,11 @@\n from tensorflow_addons.losses.focal_loss import sigmoid_focal_crossentropy, SigmoidFocalCrossEntropy\n from tensorflow_addons.losses.giou_loss import giou_loss, GIoULoss\n from tensorflow_addons.losses.lifted import lifted_struct_loss, LiftedStructLoss\n-from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss\n from tensorflow_addons.losses.sparsemax_loss import sparsemax_loss, SparsemaxLoss\n from tensorflow_addons.losses.triplet import triplet_semihard_loss, TripletSemiHardLoss\n+\n+# Temporarily disable for windows\n+# Remove after: https://github.com/tensorflow/addons/issues/838\n+import os\n+if os.name != 'nt':\n+ from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss\n", "issue": "Add nightly tests for windows/macos\nCurrently we only test our nightlies on linux:\r\nhttps://github.com/tensorflow/addons/blob/master/.travis.yml#L17\r\n\r\nIt should be relatively simple to enable tests for macos/windows, with the one caveat that `tf-nightly` is not published for windows. \n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Additional losses that conform to Keras API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_addons.losses.contrastive import contrastive_loss, ContrastiveLoss\nfrom tensorflow_addons.losses.focal_loss import sigmoid_focal_crossentropy, SigmoidFocalCrossEntropy\nfrom tensorflow_addons.losses.giou_loss import giou_loss, GIoULoss\nfrom tensorflow_addons.losses.lifted import lifted_struct_loss, LiftedStructLoss\nfrom tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss\nfrom tensorflow_addons.losses.sparsemax_loss import sparsemax_loss, SparsemaxLoss\nfrom tensorflow_addons.losses.triplet import triplet_semihard_loss, TripletSemiHardLoss\n", "path": "tensorflow_addons/losses/__init__.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Additional losses that conform to Keras API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_addons.losses.contrastive import contrastive_loss, ContrastiveLoss\nfrom tensorflow_addons.losses.focal_loss import sigmoid_focal_crossentropy, SigmoidFocalCrossEntropy\nfrom tensorflow_addons.losses.giou_loss import giou_loss, GIoULoss\nfrom tensorflow_addons.losses.lifted import lifted_struct_loss, LiftedStructLoss\nfrom tensorflow_addons.losses.sparsemax_loss import sparsemax_loss, SparsemaxLoss\nfrom tensorflow_addons.losses.triplet import triplet_semihard_loss, TripletSemiHardLoss\n\n# Temporarily disable for windows\n# Remove after: https://github.com/tensorflow/addons/issues/838\nimport os\nif os.name != 'nt':\n from tensorflow_addons.losses.npairs import npairs_loss, NpairsLoss, npairs_multilabel_loss, NpairsMultilabelLoss\n", "path": "tensorflow_addons/losses/__init__.py"}]} | 701 | 275 |
gh_patches_debug_2355 | rasdani/github-patches | git_diff | pytorch__text-248 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A batch object created by fromvars does not have "fields" attribute
When making a batch object, the value of the `fields` attribute is set in its `__init__` method.
However, when created with `fromvars` class method, `fields` attribute is not set since the method first creates an empty object and then add information.
It should be modified to be analogous with the one created by `__init__` method.
It can be simply done by adding the following after https://github.com/pytorch/text/blob/master/torchtext/data/batch.py#L36:
```
batch.fields = dataset.fields.keys()
```
This kind of object creation is found when using BPTT iterator. Without `fields` attribute, printing a batch object is not possible due to https://github.com/pytorch/text/blob/master/torchtext/data/batch.py#L49.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/batch.py`
Content:
```
1 from torch import typename
2 from torch.tensor import _TensorBase
3
4
5 class Batch(object):
6 """Defines a batch of examples along with its Fields.
7
8 Attributes:
9 batch_size: Number of examples in the batch.
10 dataset: A reference to the dataset object the examples come from
11 (which itself contains the dataset's Field objects).
12 train: Whether the batch is from a training set.
13
14 Also stores the Variable for each column in the batch as an attribute.
15 """
16
17 def __init__(self, data=None, dataset=None, device=None, train=True):
18 """Create a Batch from a list of examples."""
19 if data is not None:
20 self.batch_size = len(data)
21 self.dataset = dataset
22 self.train = train
23 self.fields = dataset.fields.keys() # copy field names
24
25 for (name, field) in dataset.fields.items():
26 if field is not None:
27 batch = [x.__dict__[name] for x in data]
28 setattr(self, name, field.process(batch, device=device, train=train))
29
30 @classmethod
31 def fromvars(cls, dataset, batch_size, train=True, **kwargs):
32 """Create a Batch directly from a number of Variables."""
33 batch = cls()
34 batch.batch_size = batch_size
35 batch.dataset = dataset
36 batch.train = train
37 for k, v in kwargs.items():
38 setattr(batch, k, v)
39 return batch
40
41 def __repr__(self):
42 return str(self)
43
44 def __str__(self):
45 if not self.__dict__:
46 return 'Empty {} instance'.format(typename(self))
47
48 var_strs = '\n'.join(['\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
49 for name in self.fields if hasattr(self, name)])
50
51 data_str = (' from {}'.format(self.dataset.name.upper())
52 if hasattr(self.dataset, 'name') and
53 isinstance(self.dataset.name, str) else '')
54
55 strt = '[{} of size {}{}]\n{}'.format(typename(self),
56 self.batch_size, data_str, var_strs)
57 return '\n' + strt
58
59 def __len__(self):
60 return self.batch_size
61
62
63 def _short_str(tensor):
64 # unwrap variable to tensor
65 if hasattr(tensor, 'data'):
66 tensor = tensor.data
67
68 # fallback in case of wrong argument type
69 if issubclass(type(tensor), _TensorBase) is False:
70 return str(tensor)
71
72 # copied from torch _tensor_str
73 size_str = 'x'.join(str(size) for size in tensor.size())
74 device_str = '' if not tensor.is_cuda else \
75 ' (GPU {})'.format(tensor.get_device())
76 strt = '[{} of size {}{}]'.format(typename(tensor),
77 size_str, device_str)
78 return strt
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py
--- a/torchtext/data/batch.py
+++ b/torchtext/data/batch.py
@@ -34,6 +34,7 @@
batch.batch_size = batch_size
batch.dataset = dataset
batch.train = train
+ batch.fields = dataset.fields.keys()
for k, v in kwargs.items():
setattr(batch, k, v)
return batch
| {"golden_diff": "diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py\n--- a/torchtext/data/batch.py\n+++ b/torchtext/data/batch.py\n@@ -34,6 +34,7 @@\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n+ batch.fields = dataset.fields.keys()\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n", "issue": "A batch object created by fromvars does not have \"fields\" attribute\nWhen making a batch object, the value of the `fields` attribute is set in its `__init__` method.\r\nHowever, when created with `fromvars` class method, `fields` attribute is not set since the method first creates an empty object and then add information.\r\nIt should be modified to be analogous with the one created by `__init__` method.\r\nIt can be simply done by adding the following after https://github.com/pytorch/text/blob/master/torchtext/data/batch.py#L36:\r\n```\r\nbatch.fields = dataset.fields.keys()\r\n```\r\n\r\nThis kind of object creation is found when using BPTT iterator. Without `fields` attribute, printing a batch object is not possible due to https://github.com/pytorch/text/blob/master/torchtext/data/batch.py#L49.\n", "before_files": [{"content": "from torch import typename\nfrom torch.tensor import _TensorBase\n\n\nclass Batch(object):\n \"\"\"Defines a batch of examples along with its Fields.\n\n Attributes:\n batch_size: Number of examples in the batch.\n dataset: A reference to the dataset object the examples come from\n (which itself contains the dataset's Field objects).\n train: Whether the batch is from a training set.\n\n Also stores the Variable for each column in the batch as an attribute.\n \"\"\"\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n self.fields = dataset.fields.keys() # copy field names\n\n for (name, field) in dataset.fields.items():\n if field is not None:\n batch = [x.__dict__[name] for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n \"\"\"Create a Batch directly from a number of Variables.\"\"\"\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if not self.__dict__:\n return 'Empty {} instance'.format(typename(self))\n\n var_strs = '\\n'.join(['\\t[.' + name + ']' + \":\" + _short_str(getattr(self, name))\n for name in self.fields if hasattr(self, name)])\n\n data_str = (' from {}'.format(self.dataset.name.upper())\n if hasattr(self.dataset, 'name') and\n isinstance(self.dataset.name, str) else '')\n\n strt = '[{} of size {}{}]\\n{}'.format(typename(self),\n self.batch_size, data_str, var_strs)\n return '\\n' + strt\n\n def __len__(self):\n return self.batch_size\n\n\ndef _short_str(tensor):\n # unwrap variable to tensor\n if hasattr(tensor, 'data'):\n tensor = tensor.data\n\n # fallback in case of wrong argument type\n if issubclass(type(tensor), _TensorBase) is False:\n return str(tensor)\n\n # copied from torch _tensor_str\n size_str = 'x'.join(str(size) for size in tensor.size())\n device_str = '' if not tensor.is_cuda else \\\n ' (GPU {})'.format(tensor.get_device())\n strt = '[{} of size {}{}]'.format(typename(tensor),\n size_str, device_str)\n return strt\n", "path": "torchtext/data/batch.py"}], "after_files": [{"content": "from torch import typename\nfrom torch.tensor import _TensorBase\n\n\nclass Batch(object):\n \"\"\"Defines a batch of examples along with its Fields.\n\n Attributes:\n batch_size: Number of examples in the batch.\n dataset: A reference to the dataset object the examples come from\n (which itself contains the dataset's Field objects).\n train: Whether the batch is from a training set.\n\n Also stores the Variable for each column in the batch as an attribute.\n \"\"\"\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n self.fields = dataset.fields.keys() # copy field names\n\n for (name, field) in dataset.fields.items():\n if field is not None:\n batch = [x.__dict__[name] for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n \"\"\"Create a Batch directly from a number of Variables.\"\"\"\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n batch.fields = dataset.fields.keys()\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if not self.__dict__:\n return 'Empty {} instance'.format(typename(self))\n\n var_strs = '\\n'.join(['\\t[.' + name + ']' + \":\" + _short_str(getattr(self, name))\n for name in self.fields if hasattr(self, name)])\n\n data_str = (' from {}'.format(self.dataset.name.upper())\n if hasattr(self.dataset, 'name') and\n isinstance(self.dataset.name, str) else '')\n\n strt = '[{} of size {}{}]\\n{}'.format(typename(self),\n self.batch_size, data_str, var_strs)\n return '\\n' + strt\n\n def __len__(self):\n return self.batch_size\n\n\ndef _short_str(tensor):\n # unwrap variable to tensor\n if hasattr(tensor, 'data'):\n tensor = tensor.data\n\n # fallback in case of wrong argument type\n if issubclass(type(tensor), _TensorBase) is False:\n return str(tensor)\n\n # copied from torch _tensor_str\n size_str = 'x'.join(str(size) for size in tensor.size())\n device_str = '' if not tensor.is_cuda else \\\n ' (GPU {})'.format(tensor.get_device())\n strt = '[{} of size {}{}]'.format(typename(tensor),\n size_str, device_str)\n return strt\n", "path": "torchtext/data/batch.py"}]} | 1,203 | 102 |
gh_patches_debug_12042 | rasdani/github-patches | git_diff | pytorch__pytorch-4563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RuntimeError: Unsupported op descriptor: thnn_batch_norm_forward-5-eps-momentum-training. File a bug report.
I started getting this with the latest code while using JIT:
RuntimeError: Unsupported op descriptor: thnn_batch_norm_forward-5-eps-momentum-training. File a bug report.
It did not happen 7 days ago and still does not happen if I roll back pytorch to the version I had 7 days ago. Do you need any simplified test case to fix it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/jit/gen_jit_dispatch.py`
Content:
```
1 import os
2 import argparse
3 from itertools import count
4 from ..autograd.utils import CodeTemplate, write
5 from ..autograd.gen_autograd import load_aten_declarations
6
7 template_path = os.path.join(os.path.dirname(__file__), 'templates')
8
9 ATEN_DISPATCH_H = CodeTemplate.from_file(template_path + '/aten_dispatch.h')
10 ATEN_DISPATCH_CPP = CodeTemplate.from_file(template_path + '/aten_dispatch.cpp')
11
12 ATTR_METHOD_MAP = {
13 'int64_t': 'i',
14 'IntList': 'is',
15 'Scalar': 't',
16 'bool': 'i',
17 'double': 'f',
18 'std::array<bool,2>': 'is',
19 'std::array<bool,3>': 'is',
20 }
21
22 TYPE_CASTS = {
23 'std::array<bool,2>': 'as_bool_array<2>',
24 'std::array<bool,3>': 'as_bool_array<3>',
25 'Scalar': 'Scalar',
26 'IntList': 'std::vector<int64_t>',
27 }
28
29 ATTR_ASSIGNMENT = CodeTemplate("""\
30 auto ${name} = ${type_cast}(node->${method}(stringToSymbol("${name}")));\
31 """)
32
33 CALL_NAMESPACE = CodeTemplate("at::${name}(${args})")
34 CALL_METHOD = CodeTemplate("TensorTemporary(inputs[0]).value().${name}(${args})")
35
36 CONSTRUCTOR = CodeTemplate("""\
37 {"${descriptor}", [](Node *node) {
38 ${assignments}
39 return TensorOp([=](const list_of_retainable & inputs,
40 list_of_retainable & outputs) {
41 autograd::profiler::RecordFunction record("${name}");
42 AutoGPU device_guard(deviceForInputs(inputs));
43 pack_list(outputs, ${call});
44 }, "${name}", ${num_inputs});
45 }},
46 """)
47
48
49 def is_jit_op(decl):
50 return (not decl['api_name'].endswith('_') and
51 not decl['name'].endswith('_out') and
52 not decl['name'].endswith('_forward') and
53 not any(arg['simple_type'] == 'Generator' for arg in decl['arguments']) and
54 not any(arg['simple_type'] == 'SparseTensor' for arg in decl['arguments']) and
55 not any(arg['simple_type'] == 'Storage' for arg in decl['arguments']) and
56 any(arg['simple_type'] in {'Tensor', 'TensorList'} for arg in decl['arguments']) and
57 'Tensor' in decl['return_type'])
58
59
60 def gen_jit_dispatch(declarations, out):
61 aten_decls = load_aten_declarations(declarations)
62 jit_decls = [d for d in aten_decls if is_jit_op(d)]
63
64 def is_tensor_arg(arg):
65 return arg['simple_type'] in {'Tensor', 'TensorList'}
66
67 ops = {}
68 for decl in jit_decls:
69 arguments = decl['arguments']
70 name = decl['name']
71 scalar_args = [arg for arg in arguments if not is_tensor_arg(arg)]
72 has_tensorlist = any(arg['simple_type'] == 'TensorList' for arg in arguments)
73
74 # Descriptor is a unique identified for a particular overload of an op
75 attr_names = sorted([arg['name'] for arg in scalar_args])
76 num_inputs = len(arguments) - len(scalar_args) if not has_tensorlist else "*"
77 descriptor = '-'.join([decl['name'], str(num_inputs)] + attr_names)
78
79 # All scalar args need to be assigned, so they can be captured by a lambda
80 assignments = [ATTR_ASSIGNMENT.substitute(type=arg['simple_type'],
81 type_cast=TYPE_CASTS.get(arg['simple_type'], arg['simple_type']),
82 name=arg['name'],
83 method=ATTR_METHOD_MAP[arg['simple_type']])
84 for arg in scalar_args]
85
86 # Generate the actuall ATen call. This gets a bit tricky because of
87 # TensorList arguments, and functions that are only available as methods.
88 if 'namespace' in decl['method_of']:
89 if has_tensorlist:
90 if sum(map(is_tensor_arg, arguments)) != 1:
91 # TODO: support this
92 continue
93 args = ['TensorTemporaryList(inputs)' if is_tensor_arg(arg) else arg['name']
94 for arg in arguments]
95 else:
96 tensor_id = iter(count(start=0))
97 args = ['TensorTemporary(inputs[{}]).value()'.format(
98 next(tensor_id)) if is_tensor_arg(arg) else arg['name']
99 for arg in arguments]
100 call = CALL_NAMESPACE.substitute(name=name, args=args)
101 else:
102 tensor_id = iter(count(start=1))
103 args = ['TensorTemporary(inputs[{}]).value()'.format(next(tensor_id)) if is_tensor_arg(arg) else arg['name']
104 for arg in arguments[1:]]
105 call = CALL_METHOD.substitute(name=name, args=args)
106
107 constructor = CONSTRUCTOR.substitute(descriptor=descriptor, name=name, call=call,
108 assignments=assignments,
109 # num_inputs is only used in AutogradClosure, which
110 # is going to be removed soon anyway. There's no good value
111 # we can provide for cat.
112 num_inputs=num_inputs if num_inputs != "*" else 0)
113 assert descriptor not in ops, descriptor
114 ops[descriptor] = constructor
115
116 # Sort the generated snippets to ensure that the generation is deterministic
117 env = {'constructors': sorted(list(ops.values()))}
118 write(out, 'aten_dispatch.h', ATEN_DISPATCH_H, env)
119 write(out, 'aten_dispatch.cpp', ATEN_DISPATCH_CPP, env)
120
121
122 def main():
123 parser = argparse.ArgumentParser(
124 description='Generate JIT op dispatch')
125 parser.add_argument('declarations', metavar='DECL',
126 help='path to Declarations.yaml')
127 parser.add_argument('out', metavar='OUT',
128 help='path to output directory')
129 args = parser.parse_args()
130 gen_jit_dispatch(args.declarations, args.out)
131
132
133 if __name__ == '__main__':
134 main()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/jit/gen_jit_dispatch.py b/tools/jit/gen_jit_dispatch.py
--- a/tools/jit/gen_jit_dispatch.py
+++ b/tools/jit/gen_jit_dispatch.py
@@ -49,7 +49,6 @@
def is_jit_op(decl):
return (not decl['api_name'].endswith('_') and
not decl['name'].endswith('_out') and
- not decl['name'].endswith('_forward') and
not any(arg['simple_type'] == 'Generator' for arg in decl['arguments']) and
not any(arg['simple_type'] == 'SparseTensor' for arg in decl['arguments']) and
not any(arg['simple_type'] == 'Storage' for arg in decl['arguments']) and
| {"golden_diff": "diff --git a/tools/jit/gen_jit_dispatch.py b/tools/jit/gen_jit_dispatch.py\n--- a/tools/jit/gen_jit_dispatch.py\n+++ b/tools/jit/gen_jit_dispatch.py\n@@ -49,7 +49,6 @@\n def is_jit_op(decl):\n return (not decl['api_name'].endswith('_') and\n not decl['name'].endswith('_out') and\n- not decl['name'].endswith('_forward') and\n not any(arg['simple_type'] == 'Generator' for arg in decl['arguments']) and\n not any(arg['simple_type'] == 'SparseTensor' for arg in decl['arguments']) and\n not any(arg['simple_type'] == 'Storage' for arg in decl['arguments']) and\n", "issue": "RuntimeError: Unsupported op descriptor: thnn_batch_norm_forward-5-eps-momentum-training. File a bug report.\nI started getting this with the latest code while using JIT:\r\n\r\nRuntimeError: Unsupported op descriptor: thnn_batch_norm_forward-5-eps-momentum-training. File a bug report.\r\n\r\nIt did not happen 7 days ago and still does not happen if I roll back pytorch to the version I had 7 days ago. Do you need any simplified test case to fix it?\r\n \n", "before_files": [{"content": "import os\nimport argparse\nfrom itertools import count\nfrom ..autograd.utils import CodeTemplate, write\nfrom ..autograd.gen_autograd import load_aten_declarations\n\ntemplate_path = os.path.join(os.path.dirname(__file__), 'templates')\n\nATEN_DISPATCH_H = CodeTemplate.from_file(template_path + '/aten_dispatch.h')\nATEN_DISPATCH_CPP = CodeTemplate.from_file(template_path + '/aten_dispatch.cpp')\n\nATTR_METHOD_MAP = {\n 'int64_t': 'i',\n 'IntList': 'is',\n 'Scalar': 't',\n 'bool': 'i',\n 'double': 'f',\n 'std::array<bool,2>': 'is',\n 'std::array<bool,3>': 'is',\n}\n\nTYPE_CASTS = {\n 'std::array<bool,2>': 'as_bool_array<2>',\n 'std::array<bool,3>': 'as_bool_array<3>',\n 'Scalar': 'Scalar',\n 'IntList': 'std::vector<int64_t>',\n}\n\nATTR_ASSIGNMENT = CodeTemplate(\"\"\"\\\nauto ${name} = ${type_cast}(node->${method}(stringToSymbol(\"${name}\")));\\\n\"\"\")\n\nCALL_NAMESPACE = CodeTemplate(\"at::${name}(${args})\")\nCALL_METHOD = CodeTemplate(\"TensorTemporary(inputs[0]).value().${name}(${args})\")\n\nCONSTRUCTOR = CodeTemplate(\"\"\"\\\n{\"${descriptor}\", [](Node *node) {\n ${assignments}\n return TensorOp([=](const list_of_retainable & inputs,\n list_of_retainable & outputs) {\n autograd::profiler::RecordFunction record(\"${name}\");\n AutoGPU device_guard(deviceForInputs(inputs));\n pack_list(outputs, ${call});\n }, \"${name}\", ${num_inputs});\n}},\n\"\"\")\n\n\ndef is_jit_op(decl):\n return (not decl['api_name'].endswith('_') and\n not decl['name'].endswith('_out') and\n not decl['name'].endswith('_forward') and\n not any(arg['simple_type'] == 'Generator' for arg in decl['arguments']) and\n not any(arg['simple_type'] == 'SparseTensor' for arg in decl['arguments']) and\n not any(arg['simple_type'] == 'Storage' for arg in decl['arguments']) and\n any(arg['simple_type'] in {'Tensor', 'TensorList'} for arg in decl['arguments']) and\n 'Tensor' in decl['return_type'])\n\n\ndef gen_jit_dispatch(declarations, out):\n aten_decls = load_aten_declarations(declarations)\n jit_decls = [d for d in aten_decls if is_jit_op(d)]\n\n def is_tensor_arg(arg):\n return arg['simple_type'] in {'Tensor', 'TensorList'}\n\n ops = {}\n for decl in jit_decls:\n arguments = decl['arguments']\n name = decl['name']\n scalar_args = [arg for arg in arguments if not is_tensor_arg(arg)]\n has_tensorlist = any(arg['simple_type'] == 'TensorList' for arg in arguments)\n\n # Descriptor is a unique identified for a particular overload of an op\n attr_names = sorted([arg['name'] for arg in scalar_args])\n num_inputs = len(arguments) - len(scalar_args) if not has_tensorlist else \"*\"\n descriptor = '-'.join([decl['name'], str(num_inputs)] + attr_names)\n\n # All scalar args need to be assigned, so they can be captured by a lambda\n assignments = [ATTR_ASSIGNMENT.substitute(type=arg['simple_type'],\n type_cast=TYPE_CASTS.get(arg['simple_type'], arg['simple_type']),\n name=arg['name'],\n method=ATTR_METHOD_MAP[arg['simple_type']])\n for arg in scalar_args]\n\n # Generate the actuall ATen call. This gets a bit tricky because of\n # TensorList arguments, and functions that are only available as methods.\n if 'namespace' in decl['method_of']:\n if has_tensorlist:\n if sum(map(is_tensor_arg, arguments)) != 1:\n # TODO: support this\n continue\n args = ['TensorTemporaryList(inputs)' if is_tensor_arg(arg) else arg['name']\n for arg in arguments]\n else:\n tensor_id = iter(count(start=0))\n args = ['TensorTemporary(inputs[{}]).value()'.format(\n next(tensor_id)) if is_tensor_arg(arg) else arg['name']\n for arg in arguments]\n call = CALL_NAMESPACE.substitute(name=name, args=args)\n else:\n tensor_id = iter(count(start=1))\n args = ['TensorTemporary(inputs[{}]).value()'.format(next(tensor_id)) if is_tensor_arg(arg) else arg['name']\n for arg in arguments[1:]]\n call = CALL_METHOD.substitute(name=name, args=args)\n\n constructor = CONSTRUCTOR.substitute(descriptor=descriptor, name=name, call=call,\n assignments=assignments,\n # num_inputs is only used in AutogradClosure, which\n # is going to be removed soon anyway. There's no good value\n # we can provide for cat.\n num_inputs=num_inputs if num_inputs != \"*\" else 0)\n assert descriptor not in ops, descriptor\n ops[descriptor] = constructor\n\n # Sort the generated snippets to ensure that the generation is deterministic\n env = {'constructors': sorted(list(ops.values()))}\n write(out, 'aten_dispatch.h', ATEN_DISPATCH_H, env)\n write(out, 'aten_dispatch.cpp', ATEN_DISPATCH_CPP, env)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Generate JIT op dispatch')\n parser.add_argument('declarations', metavar='DECL',\n help='path to Declarations.yaml')\n parser.add_argument('out', metavar='OUT',\n help='path to output directory')\n args = parser.parse_args()\n gen_jit_dispatch(args.declarations, args.out)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/jit/gen_jit_dispatch.py"}], "after_files": [{"content": "import os\nimport argparse\nfrom itertools import count\nfrom ..autograd.utils import CodeTemplate, write\nfrom ..autograd.gen_autograd import load_aten_declarations\n\ntemplate_path = os.path.join(os.path.dirname(__file__), 'templates')\n\nATEN_DISPATCH_H = CodeTemplate.from_file(template_path + '/aten_dispatch.h')\nATEN_DISPATCH_CPP = CodeTemplate.from_file(template_path + '/aten_dispatch.cpp')\n\nATTR_METHOD_MAP = {\n 'int64_t': 'i',\n 'IntList': 'is',\n 'Scalar': 't',\n 'bool': 'i',\n 'double': 'f',\n 'std::array<bool,2>': 'is',\n 'std::array<bool,3>': 'is',\n}\n\nTYPE_CASTS = {\n 'std::array<bool,2>': 'as_bool_array<2>',\n 'std::array<bool,3>': 'as_bool_array<3>',\n 'Scalar': 'Scalar',\n 'IntList': 'std::vector<int64_t>',\n}\n\nATTR_ASSIGNMENT = CodeTemplate(\"\"\"\\\nauto ${name} = ${type_cast}(node->${method}(stringToSymbol(\"${name}\")));\\\n\"\"\")\n\nCALL_NAMESPACE = CodeTemplate(\"at::${name}(${args})\")\nCALL_METHOD = CodeTemplate(\"TensorTemporary(inputs[0]).value().${name}(${args})\")\n\nCONSTRUCTOR = CodeTemplate(\"\"\"\\\n{\"${descriptor}\", [](Node *node) {\n ${assignments}\n return TensorOp([=](const list_of_retainable & inputs,\n list_of_retainable & outputs) {\n autograd::profiler::RecordFunction record(\"${name}\");\n AutoGPU device_guard(deviceForInputs(inputs));\n pack_list(outputs, ${call});\n }, \"${name}\", ${num_inputs});\n}},\n\"\"\")\n\n\ndef is_jit_op(decl):\n return (not decl['api_name'].endswith('_') and\n not decl['name'].endswith('_out') and\n not any(arg['simple_type'] == 'Generator' for arg in decl['arguments']) and\n not any(arg['simple_type'] == 'SparseTensor' for arg in decl['arguments']) and\n not any(arg['simple_type'] == 'Storage' for arg in decl['arguments']) and\n any(arg['simple_type'] in {'Tensor', 'TensorList'} for arg in decl['arguments']) and\n 'Tensor' in decl['return_type'])\n\n\ndef gen_jit_dispatch(declarations, out):\n aten_decls = load_aten_declarations(declarations)\n jit_decls = [d for d in aten_decls if is_jit_op(d)]\n\n def is_tensor_arg(arg):\n return arg['simple_type'] in {'Tensor', 'TensorList'}\n\n ops = {}\n for decl in jit_decls:\n arguments = decl['arguments']\n name = decl['name']\n scalar_args = [arg for arg in arguments if not is_tensor_arg(arg)]\n has_tensorlist = any(arg['simple_type'] == 'TensorList' for arg in arguments)\n\n # Descriptor is a unique identified for a particular overload of an op\n attr_names = sorted([arg['name'] for arg in scalar_args])\n num_inputs = len(arguments) - len(scalar_args) if not has_tensorlist else \"*\"\n descriptor = '-'.join([decl['name'], str(num_inputs)] + attr_names)\n\n # All scalar args need to be assigned, so they can be captured by a lambda\n assignments = [ATTR_ASSIGNMENT.substitute(type=arg['simple_type'],\n type_cast=TYPE_CASTS.get(arg['simple_type'], arg['simple_type']),\n name=arg['name'],\n method=ATTR_METHOD_MAP[arg['simple_type']])\n for arg in scalar_args]\n\n # Generate the actuall ATen call. This gets a bit tricky because of\n # TensorList arguments, and functions that are only available as methods.\n if 'namespace' in decl['method_of']:\n if has_tensorlist:\n if sum(map(is_tensor_arg, arguments)) != 1:\n # TODO: support this\n continue\n args = ['TensorTemporaryList(inputs)' if is_tensor_arg(arg) else arg['name']\n for arg in arguments]\n else:\n tensor_id = iter(count(start=0))\n args = ['TensorTemporary(inputs[{}]).value()'.format(\n next(tensor_id)) if is_tensor_arg(arg) else arg['name']\n for arg in arguments]\n call = CALL_NAMESPACE.substitute(name=name, args=args)\n else:\n tensor_id = iter(count(start=1))\n args = ['TensorTemporary(inputs[{}]).value()'.format(next(tensor_id)) if is_tensor_arg(arg) else arg['name']\n for arg in arguments[1:]]\n call = CALL_METHOD.substitute(name=name, args=args)\n\n constructor = CONSTRUCTOR.substitute(descriptor=descriptor, name=name, call=call,\n assignments=assignments,\n # num_inputs is only used in AutogradClosure, which\n # is going to be removed soon anyway. There's no good value\n # we can provide for cat.\n num_inputs=num_inputs if num_inputs != \"*\" else 0)\n assert descriptor not in ops, descriptor\n ops[descriptor] = constructor\n\n # Sort the generated snippets to ensure that the generation is deterministic\n env = {'constructors': sorted(list(ops.values()))}\n write(out, 'aten_dispatch.h', ATEN_DISPATCH_H, env)\n write(out, 'aten_dispatch.cpp', ATEN_DISPATCH_CPP, env)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Generate JIT op dispatch')\n parser.add_argument('declarations', metavar='DECL',\n help='path to Declarations.yaml')\n parser.add_argument('out', metavar='OUT',\n help='path to output directory')\n args = parser.parse_args()\n gen_jit_dispatch(args.declarations, args.out)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/jit/gen_jit_dispatch.py"}]} | 1,954 | 170 |
gh_patches_debug_36636 | rasdani/github-patches | git_diff | falconry__falcon-541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Has compile_uri_template been removed?
I can't see it in the code any more.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/routing/util.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from falcon import HTTP_METHODS, responders
16 from falcon.hooks import _wrap_with_hooks
17
18
19 def create_http_method_map(resource, before, after):
20 """Maps HTTP methods (e.g., 'GET', 'POST') to methods of a resource object.
21
22 Args:
23 resource: An object with *responder* methods, following the naming
24 convention *on_\**, that correspond to each method the resource
25 supports. For example, if a resource supports GET and POST, it
26 should define ``on_get(self, req, resp)`` and
27 ``on_post(self, req, resp)``.
28 before: An action hook or ``list`` of hooks to be called before each
29 *on_\** responder defined by the resource.
30 after: An action hook or ``list`` of hooks to be called after each
31 *on_\** responder defined by the resource.
32
33 Returns:
34 dict: A mapping of HTTP methods to responders.
35
36 """
37
38 method_map = {}
39
40 for method in HTTP_METHODS:
41 try:
42 responder = getattr(resource, 'on_' + method.lower())
43 except AttributeError:
44 # resource does not implement this method
45 pass
46 else:
47 # Usually expect a method, but any callable will do
48 if callable(responder):
49 responder = _wrap_with_hooks(
50 before, after, responder, resource)
51 method_map[method] = responder
52
53 # Attach a resource for unsupported HTTP methods
54 allowed_methods = sorted(list(method_map.keys()))
55
56 # NOTE(sebasmagri): We want the OPTIONS and 405 (Not Allowed) methods
57 # responders to be wrapped on global hooks
58 if 'OPTIONS' not in method_map:
59 # OPTIONS itself is intentionally excluded from the Allow header
60 responder = responders.create_default_options(
61 allowed_methods)
62 method_map['OPTIONS'] = _wrap_with_hooks(
63 before, after, responder, resource)
64 allowed_methods.append('OPTIONS')
65
66 na_responder = responders.create_method_not_allowed(allowed_methods)
67
68 for method in HTTP_METHODS:
69 if method not in allowed_methods:
70 method_map[method] = _wrap_with_hooks(
71 before, after, na_responder, resource)
72
73 return method_map
74
```
Path: `falcon/routing/__init__.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from falcon.routing.compiled import CompiledRouter
16 from falcon.routing.util import create_http_method_map # NOQA
17
18
19 DefaultRouter = CompiledRouter
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/routing/__init__.py b/falcon/routing/__init__.py
--- a/falcon/routing/__init__.py
+++ b/falcon/routing/__init__.py
@@ -14,6 +14,7 @@
from falcon.routing.compiled import CompiledRouter
from falcon.routing.util import create_http_method_map # NOQA
+from falcon.routing.util import compile_uri_template # NOQA
DefaultRouter = CompiledRouter
diff --git a/falcon/routing/util.py b/falcon/routing/util.py
--- a/falcon/routing/util.py
+++ b/falcon/routing/util.py
@@ -12,10 +12,72 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import re
+
+import six
+
from falcon import HTTP_METHODS, responders
from falcon.hooks import _wrap_with_hooks
+# NOTE(kgriffs): Published method; take care to avoid breaking changes.
+def compile_uri_template(template):
+ """Compile the given URI template string into a pattern matcher.
+
+ This function can be used to construct custom routing engines that
+ iterate through a list of possible routes, attempting to match
+ an incoming request against each route's compiled regular expression.
+
+ Each field is converted to a named group, so that when a match
+ is found, the fields can be easily extracted using
+ :py:meth:`re.MatchObject.groupdict`.
+
+ This function does not support the more flexible templating
+ syntax used in the default router. Only simple paths with bracketed
+ field expressions are recognized. For example::
+
+ /
+ /books
+ /books/{isbn}
+ /books/{isbn}/characters
+ /books/{isbn}/characters/{name}
+
+ Also, note that if the template contains a trailing slash character,
+ it will be stripped in order to normalize the routing logic.
+
+ Args:
+ template(str): The template to compile. Note that field names are
+ restricted to ASCII a-z, A-Z, and the underscore character.
+
+ Returns:
+ tuple: (template_field_names, template_regex)
+ """
+
+ if not isinstance(template, six.string_types):
+ raise TypeError('uri_template is not a string')
+
+ if not template.startswith('/'):
+ raise ValueError("uri_template must start with '/'")
+
+ if '//' in template:
+ raise ValueError("uri_template may not contain '//'")
+
+ if template != '/' and template.endswith('/'):
+ template = template[:-1]
+
+ expression_pattern = r'{([a-zA-Z][a-zA-Z_]*)}'
+
+ # Get a list of field names
+ fields = set(re.findall(expression_pattern, template))
+
+ # Convert Level 1 var patterns to equivalent named regex groups
+ escaped = re.sub(r'[\.\(\)\[\]\?\*\+\^\|]', r'\\\g<0>', template)
+ pattern = re.sub(expression_pattern, r'(?P<\1>[^/]+)', escaped)
+ pattern = r'\A' + pattern + r'\Z'
+
+ return fields, re.compile(pattern, re.IGNORECASE)
+
+
def create_http_method_map(resource, before, after):
"""Maps HTTP methods (e.g., 'GET', 'POST') to methods of a resource object.
| {"golden_diff": "diff --git a/falcon/routing/__init__.py b/falcon/routing/__init__.py\n--- a/falcon/routing/__init__.py\n+++ b/falcon/routing/__init__.py\n@@ -14,6 +14,7 @@\n \n from falcon.routing.compiled import CompiledRouter\n from falcon.routing.util import create_http_method_map # NOQA\n+from falcon.routing.util import compile_uri_template # NOQA\n \n \n DefaultRouter = CompiledRouter\ndiff --git a/falcon/routing/util.py b/falcon/routing/util.py\n--- a/falcon/routing/util.py\n+++ b/falcon/routing/util.py\n@@ -12,10 +12,72 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import re\n+\n+import six\n+\n from falcon import HTTP_METHODS, responders\n from falcon.hooks import _wrap_with_hooks\n \n \n+# NOTE(kgriffs): Published method; take care to avoid breaking changes.\n+def compile_uri_template(template):\n+ \"\"\"Compile the given URI template string into a pattern matcher.\n+\n+ This function can be used to construct custom routing engines that\n+ iterate through a list of possible routes, attempting to match\n+ an incoming request against each route's compiled regular expression.\n+\n+ Each field is converted to a named group, so that when a match\n+ is found, the fields can be easily extracted using\n+ :py:meth:`re.MatchObject.groupdict`.\n+\n+ This function does not support the more flexible templating\n+ syntax used in the default router. Only simple paths with bracketed\n+ field expressions are recognized. For example::\n+\n+ /\n+ /books\n+ /books/{isbn}\n+ /books/{isbn}/characters\n+ /books/{isbn}/characters/{name}\n+\n+ Also, note that if the template contains a trailing slash character,\n+ it will be stripped in order to normalize the routing logic.\n+\n+ Args:\n+ template(str): The template to compile. Note that field names are\n+ restricted to ASCII a-z, A-Z, and the underscore character.\n+\n+ Returns:\n+ tuple: (template_field_names, template_regex)\n+ \"\"\"\n+\n+ if not isinstance(template, six.string_types):\n+ raise TypeError('uri_template is not a string')\n+\n+ if not template.startswith('/'):\n+ raise ValueError(\"uri_template must start with '/'\")\n+\n+ if '//' in template:\n+ raise ValueError(\"uri_template may not contain '//'\")\n+\n+ if template != '/' and template.endswith('/'):\n+ template = template[:-1]\n+\n+ expression_pattern = r'{([a-zA-Z][a-zA-Z_]*)}'\n+\n+ # Get a list of field names\n+ fields = set(re.findall(expression_pattern, template))\n+\n+ # Convert Level 1 var patterns to equivalent named regex groups\n+ escaped = re.sub(r'[\\.\\(\\)\\[\\]\\?\\*\\+\\^\\|]', r'\\\\\\g<0>', template)\n+ pattern = re.sub(expression_pattern, r'(?P<\\1>[^/]+)', escaped)\n+ pattern = r'\\A' + pattern + r'\\Z'\n+\n+ return fields, re.compile(pattern, re.IGNORECASE)\n+\n+\n def create_http_method_map(resource, before, after):\n \"\"\"Maps HTTP methods (e.g., 'GET', 'POST') to methods of a resource object.\n", "issue": "Has compile_uri_template been removed?\nI can't see it in the code any more.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom falcon import HTTP_METHODS, responders\nfrom falcon.hooks import _wrap_with_hooks\n\n\ndef create_http_method_map(resource, before, after):\n \"\"\"Maps HTTP methods (e.g., 'GET', 'POST') to methods of a resource object.\n\n Args:\n resource: An object with *responder* methods, following the naming\n convention *on_\\**, that correspond to each method the resource\n supports. For example, if a resource supports GET and POST, it\n should define ``on_get(self, req, resp)`` and\n ``on_post(self, req, resp)``.\n before: An action hook or ``list`` of hooks to be called before each\n *on_\\** responder defined by the resource.\n after: An action hook or ``list`` of hooks to be called after each\n *on_\\** responder defined by the resource.\n\n Returns:\n dict: A mapping of HTTP methods to responders.\n\n \"\"\"\n\n method_map = {}\n\n for method in HTTP_METHODS:\n try:\n responder = getattr(resource, 'on_' + method.lower())\n except AttributeError:\n # resource does not implement this method\n pass\n else:\n # Usually expect a method, but any callable will do\n if callable(responder):\n responder = _wrap_with_hooks(\n before, after, responder, resource)\n method_map[method] = responder\n\n # Attach a resource for unsupported HTTP methods\n allowed_methods = sorted(list(method_map.keys()))\n\n # NOTE(sebasmagri): We want the OPTIONS and 405 (Not Allowed) methods\n # responders to be wrapped on global hooks\n if 'OPTIONS' not in method_map:\n # OPTIONS itself is intentionally excluded from the Allow header\n responder = responders.create_default_options(\n allowed_methods)\n method_map['OPTIONS'] = _wrap_with_hooks(\n before, after, responder, resource)\n allowed_methods.append('OPTIONS')\n\n na_responder = responders.create_method_not_allowed(allowed_methods)\n\n for method in HTTP_METHODS:\n if method not in allowed_methods:\n method_map[method] = _wrap_with_hooks(\n before, after, na_responder, resource)\n\n return method_map\n", "path": "falcon/routing/util.py"}, {"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom falcon.routing.compiled import CompiledRouter\nfrom falcon.routing.util import create_http_method_map # NOQA\n\n\nDefaultRouter = CompiledRouter\n", "path": "falcon/routing/__init__.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nimport six\n\nfrom falcon import HTTP_METHODS, responders\nfrom falcon.hooks import _wrap_with_hooks\n\n\n# NOTE(kgriffs): Published method; take care to avoid breaking changes.\ndef compile_uri_template(template):\n \"\"\"Compile the given URI template string into a pattern matcher.\n\n This function can be used to construct custom routing engines that\n iterate through a list of possible routes, attempting to match\n an incoming request against each route's compiled regular expression.\n\n Each field is converted to a named group, so that when a match\n is found, the fields can be easily extracted using\n :py:meth:`re.MatchObject.groupdict`.\n\n This function does not support the more flexible templating\n syntax used in the default router. Only simple paths with bracketed\n field expressions are recognized. For example::\n\n /\n /books\n /books/{isbn}\n /books/{isbn}/characters\n /books/{isbn}/characters/{name}\n\n Also, note that if the template contains a trailing slash character,\n it will be stripped in order to normalize the routing logic.\n\n Args:\n template(str): The template to compile. Note that field names are\n restricted to ASCII a-z, A-Z, and the underscore character.\n\n Returns:\n tuple: (template_field_names, template_regex)\n \"\"\"\n\n if not isinstance(template, six.string_types):\n raise TypeError('uri_template is not a string')\n\n if not template.startswith('/'):\n raise ValueError(\"uri_template must start with '/'\")\n\n if '//' in template:\n raise ValueError(\"uri_template may not contain '//'\")\n\n if template != '/' and template.endswith('/'):\n template = template[:-1]\n\n expression_pattern = r'{([a-zA-Z][a-zA-Z_]*)}'\n\n # Get a list of field names\n fields = set(re.findall(expression_pattern, template))\n\n # Convert Level 1 var patterns to equivalent named regex groups\n escaped = re.sub(r'[\\.\\(\\)\\[\\]\\?\\*\\+\\^\\|]', r'\\\\\\g<0>', template)\n pattern = re.sub(expression_pattern, r'(?P<\\1>[^/]+)', escaped)\n pattern = r'\\A' + pattern + r'\\Z'\n\n return fields, re.compile(pattern, re.IGNORECASE)\n\n\ndef create_http_method_map(resource, before, after):\n \"\"\"Maps HTTP methods (e.g., 'GET', 'POST') to methods of a resource object.\n\n Args:\n resource: An object with *responder* methods, following the naming\n convention *on_\\**, that correspond to each method the resource\n supports. For example, if a resource supports GET and POST, it\n should define ``on_get(self, req, resp)`` and\n ``on_post(self, req, resp)``.\n before: An action hook or ``list`` of hooks to be called before each\n *on_\\** responder defined by the resource.\n after: An action hook or ``list`` of hooks to be called after each\n *on_\\** responder defined by the resource.\n\n Returns:\n dict: A mapping of HTTP methods to responders.\n\n \"\"\"\n\n method_map = {}\n\n for method in HTTP_METHODS:\n try:\n responder = getattr(resource, 'on_' + method.lower())\n except AttributeError:\n # resource does not implement this method\n pass\n else:\n # Usually expect a method, but any callable will do\n if callable(responder):\n responder = _wrap_with_hooks(\n before, after, responder, resource)\n method_map[method] = responder\n\n # Attach a resource for unsupported HTTP methods\n allowed_methods = sorted(list(method_map.keys()))\n\n # NOTE(sebasmagri): We want the OPTIONS and 405 (Not Allowed) methods\n # responders to be wrapped on global hooks\n if 'OPTIONS' not in method_map:\n # OPTIONS itself is intentionally excluded from the Allow header\n responder = responders.create_default_options(\n allowed_methods)\n method_map['OPTIONS'] = _wrap_with_hooks(\n before, after, responder, resource)\n allowed_methods.append('OPTIONS')\n\n na_responder = responders.create_method_not_allowed(allowed_methods)\n\n for method in HTTP_METHODS:\n if method not in allowed_methods:\n method_map[method] = _wrap_with_hooks(\n before, after, na_responder, resource)\n\n return method_map\n", "path": "falcon/routing/util.py"}, {"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom falcon.routing.compiled import CompiledRouter\nfrom falcon.routing.util import create_http_method_map # NOQA\nfrom falcon.routing.util import compile_uri_template # NOQA\n\n\nDefaultRouter = CompiledRouter\n", "path": "falcon/routing/__init__.py"}]} | 1,253 | 752 |
gh_patches_debug_9545 | rasdani/github-patches | git_diff | fossasia__open-event-server-4310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add email to valid types in custom-form
**Current**
Currently we are not able to set an email type to the custom-form which leads to `Error: 422`.
**Expected**
email should be a valid type for the custom-form
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/custom_forms.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from marshmallow_jsonapi import fields
4 import marshmallow.validate as validate
5 from app.api.helpers.permissions import jwt_required
6 from flask_rest_jsonapi.exceptions import ObjectNotFound
7
8 from app.api.bootstrap import api
9 from app.api.helpers.utilities import dasherize
10 from app.models import db
11 from app.models.custom_form import CustomForms
12 from app.models.event import Event
13 from app.api.helpers.db import safe_query
14 from app.api.helpers.utilities import require_relationship
15 from app.api.helpers.permission_manager import has_access
16 from app.api.helpers.query import event_query
17
18
19 class CustomFormSchema(Schema):
20 """
21 API Schema for Custom Forms database model
22 """
23 class Meta:
24 """
25 Meta class for CustomForm Schema
26 """
27 type_ = 'custom-form'
28 self_view = 'v1.custom_form_detail'
29 self_view_kwargs = {'id': '<id>'}
30 inflect = dasherize
31
32 id = fields.Integer(dump_only=True)
33 field_identifier = fields.Str(required=True)
34 form = fields.Str(required=True)
35 type = fields.Str(default="text", validate=validate.OneOf(
36 choices=["text", "checkbox", "select", "file", "image"]))
37 is_required = fields.Boolean(default=False)
38 is_included = fields.Boolean(default=False)
39 is_fixed = fields.Boolean(default=False)
40 event = Relationship(attribute='event',
41 self_view='v1.custom_form_event',
42 self_view_kwargs={'id': '<id>'},
43 related_view='v1.event_detail',
44 related_view_kwargs={'custom_form_id': '<id>'},
45 schema='EventSchema',
46 type_='event')
47
48
49 class CustomFormListPost(ResourceList):
50 """
51 Create and List Custom Forms
52 """
53
54 def before_post(self, args, kwargs, data):
55 """
56 method to check for required relationship with event
57 :param args:
58 :param kwargs:
59 :param data:
60 :return:
61 """
62 require_relationship(['event'], data)
63 if not has_access('is_coorganizer', event_id=data['event']):
64 raise ObjectNotFound({'parameter': 'event_id'},
65 "Event: {} not found".format(data['event_id']))
66
67 schema = CustomFormSchema
68 methods = ['POST', ]
69 data_layer = {'session': db.session,
70 'model': CustomForms
71 }
72
73
74 class CustomFormList(ResourceList):
75 """
76 Create and List Custom Forms
77 """
78 def query(self, view_kwargs):
79 """
80 query method for different view_kwargs
81 :param view_kwargs:
82 :return:
83 """
84 query_ = self.session.query(CustomForms)
85 query_ = event_query(self, query_, view_kwargs)
86 return query_
87
88 view_kwargs = True
89 decorators = (jwt_required, )
90 methods = ['GET', ]
91 schema = CustomFormSchema
92 data_layer = {'session': db.session,
93 'model': CustomForms,
94 'methods': {
95 'query': query
96 }}
97
98
99 class CustomFormDetail(ResourceDetail):
100 """
101 CustomForm Resource
102 """
103
104 def before_get_object(self, view_kwargs):
105 """
106 before get method
107 :param view_kwargs:
108 :return:
109 """
110 event = None
111 if view_kwargs.get('event_id'):
112 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
113 elif view_kwargs.get('event_identifier'):
114 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
115
116 if event:
117 custom_form = safe_query(self, CustomForms, 'event_id', event.id, 'event_id')
118 view_kwargs['id'] = custom_form.id
119
120 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
121 fetch_as="event_id", model=CustomForms, methods="PATCH,DELETE"), )
122 schema = CustomFormSchema
123 data_layer = {'session': db.session,
124 'model': CustomForms}
125
126
127 class CustomFormRelationshipRequired(ResourceRelationship):
128 """
129 CustomForm Relationship (Required)
130 """
131 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
132 fetch_as="event_id", model=CustomForms, methods="PATCH"),)
133 methods = ['GET', 'PATCH']
134 schema = CustomFormSchema
135 data_layer = {'session': db.session,
136 'model': CustomForms}
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/custom_forms.py b/app/api/custom_forms.py
--- a/app/api/custom_forms.py
+++ b/app/api/custom_forms.py
@@ -33,7 +33,7 @@
field_identifier = fields.Str(required=True)
form = fields.Str(required=True)
type = fields.Str(default="text", validate=validate.OneOf(
- choices=["text", "checkbox", "select", "file", "image"]))
+ choices=["text", "checkbox", "select", "file", "image", "email"]))
is_required = fields.Boolean(default=False)
is_included = fields.Boolean(default=False)
is_fixed = fields.Boolean(default=False)
| {"golden_diff": "diff --git a/app/api/custom_forms.py b/app/api/custom_forms.py\n--- a/app/api/custom_forms.py\n+++ b/app/api/custom_forms.py\n@@ -33,7 +33,7 @@\n field_identifier = fields.Str(required=True)\n form = fields.Str(required=True)\n type = fields.Str(default=\"text\", validate=validate.OneOf(\n- choices=[\"text\", \"checkbox\", \"select\", \"file\", \"image\"]))\n+ choices=[\"text\", \"checkbox\", \"select\", \"file\", \"image\", \"email\"]))\n is_required = fields.Boolean(default=False)\n is_included = fields.Boolean(default=False)\n is_fixed = fields.Boolean(default=False)\n", "issue": "Add email to valid types in custom-form\n**Current**\r\nCurrently we are not able to set an email type to the custom-form which leads to `Error: 422`.\r\n\r\n**Expected**\r\nemail should be a valid type for the custom-form\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nimport marshmallow.validate as validate\nfrom app.api.helpers.permissions import jwt_required\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.custom_form import CustomForms\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\n\n\nclass CustomFormSchema(Schema):\n \"\"\"\n API Schema for Custom Forms database model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for CustomForm Schema\n \"\"\"\n type_ = 'custom-form'\n self_view = 'v1.custom_form_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Integer(dump_only=True)\n field_identifier = fields.Str(required=True)\n form = fields.Str(required=True)\n type = fields.Str(default=\"text\", validate=validate.OneOf(\n choices=[\"text\", \"checkbox\", \"select\", \"file\", \"image\"]))\n is_required = fields.Boolean(default=False)\n is_included = fields.Boolean(default=False)\n is_fixed = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.custom_form_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'custom_form_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass CustomFormListPost(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to check for required relationship with event\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n\n schema = CustomFormSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': CustomForms\n }\n\n\nclass CustomFormList(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(CustomForms)\n query_ = event_query(self, query_, view_kwargs)\n return query_\n\n view_kwargs = True\n decorators = (jwt_required, )\n methods = ['GET', ]\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms,\n 'methods': {\n 'query': query\n }}\n\n\nclass CustomFormDetail(ResourceDetail):\n \"\"\"\n CustomForm Resource\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method\n :param view_kwargs:\n :return:\n \"\"\"\n event = None\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n\n if event:\n custom_form = safe_query(self, CustomForms, 'event_id', event.id, 'event_id')\n view_kwargs['id'] = custom_form.id\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH,DELETE\"), )\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n\n\nclass CustomFormRelationshipRequired(ResourceRelationship):\n \"\"\"\n CustomForm Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n", "path": "app/api/custom_forms.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nimport marshmallow.validate as validate\nfrom app.api.helpers.permissions import jwt_required\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.custom_form import CustomForms\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\n\n\nclass CustomFormSchema(Schema):\n \"\"\"\n API Schema for Custom Forms database model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for CustomForm Schema\n \"\"\"\n type_ = 'custom-form'\n self_view = 'v1.custom_form_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Integer(dump_only=True)\n field_identifier = fields.Str(required=True)\n form = fields.Str(required=True)\n type = fields.Str(default=\"text\", validate=validate.OneOf(\n choices=[\"text\", \"checkbox\", \"select\", \"file\", \"image\", \"email\"]))\n is_required = fields.Boolean(default=False)\n is_included = fields.Boolean(default=False)\n is_fixed = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.custom_form_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'custom_form_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass CustomFormListPost(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to check for required relationship with event\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n\n schema = CustomFormSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': CustomForms\n }\n\n\nclass CustomFormList(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(CustomForms)\n query_ = event_query(self, query_, view_kwargs)\n return query_\n\n view_kwargs = True\n decorators = (jwt_required, )\n methods = ['GET', ]\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms,\n 'methods': {\n 'query': query\n }}\n\n\nclass CustomFormDetail(ResourceDetail):\n \"\"\"\n CustomForm Resource\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method\n :param view_kwargs:\n :return:\n \"\"\"\n event = None\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n\n if event:\n custom_form = safe_query(self, CustomForms, 'event_id', event.id, 'event_id')\n view_kwargs['id'] = custom_form.id\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH,DELETE\"), )\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n\n\nclass CustomFormRelationshipRequired(ResourceRelationship):\n \"\"\"\n CustomForm Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n", "path": "app/api/custom_forms.py"}]} | 1,571 | 143 |
gh_patches_debug_32728 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-2596 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Personnalisation plus facile de loadpaths
J'ai un cas où j'aimerais importer un gros fichier Shape, mais où j'aimerais filtrer selon certains attributs de chaque élément. Pour éviter de devoir réécrire ma propre command `loadpaths` complète, il serait pratique de déporter le filtrage des objets dans une méthode de la commande. Le patch proposé arrive...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/core/management/commands/loadpaths.py`
Content:
```
1 from django.contrib.gis.gdal import DataSource, GDALException
2 from geotrek.core.models import Path
3 from geotrek.authent.models import Structure
4 from django.contrib.gis.geos.collections import Polygon, LineString
5 from django.core.management.base import BaseCommand, CommandError
6 from django.conf import settings
7 from django.db.utils import IntegrityError, InternalError
8 from django.db import transaction
9
10
11 class Command(BaseCommand):
12 help = 'Load Paths from a file within the spatial extent\n'
13
14 def add_arguments(self, parser):
15 parser.add_argument('file_path', help="File's path of the paths")
16 parser.add_argument('--structure', action='store', dest='structure', help="Define the structure")
17 parser.add_argument('--name-attribute', '-n', action='store', dest='name', default='nom',
18 help="Name of the name's attribute inside the file")
19 parser.add_argument('--comments-attribute', '-c', nargs='*', action='store', dest='comment',
20 help="")
21 parser.add_argument('--encoding', '-e', action='store', dest='encoding', default='utf-8',
22 help='File encoding, default utf-8')
23 parser.add_argument('--srid', '-s', action='store', dest='srid', default=4326, type=int,
24 help="File's SRID")
25 parser.add_argument('--intersect', '-i', action='store_true', dest='intersect', default=False,
26 help="Check paths intersect spatial extent and not only within")
27 parser.add_argument('--fail', '-f', action='store_true', dest='fail', default=False,
28 help="Allows to grant fails")
29 parser.add_argument('--dry', '-d', action='store_true', dest='dry', default=False,
30 help="Do not change the database, dry run. Show the number of fail"
31 " and objects potentially created")
32
33 def handle(self, *args, **options):
34 verbosity = options.get('verbosity')
35 encoding = options.get('encoding')
36 file_path = options.get('file_path')
37 structure = options.get('structure')
38 name_column = options.get('name')
39 srid = options.get('srid')
40 do_intersect = options.get('intersect')
41 comments_columns = options.get('comment')
42 fail = options.get('fail')
43 dry = options.get('dry')
44
45 if dry:
46 fail = True
47
48 counter = 0
49 counter_fail = 0
50
51 if structure:
52 try:
53 structure = Structure.objects.get(name=structure)
54 except Structure.DoesNotExist:
55 raise CommandError("Structure does not match with instance's structures\n"
56 "Change your option --structure")
57 elif Structure.objects.count() == 1:
58 structure = Structure.objects.first()
59 else:
60 raise CommandError("There are more than 1 structure and you didn't define the option structure\n"
61 "Use --structure to define it")
62 if verbosity > 0:
63 self.stdout.write("All paths in DataSource will be linked to the structure : %s" % structure)
64
65 ds = DataSource(file_path, encoding=encoding)
66
67 bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)
68 bbox.srid = settings.SRID
69
70 sid = transaction.savepoint()
71
72 for layer in ds:
73 for feat in layer:
74 name = feat.get(name_column) if name_column in layer.fields else ''
75 comment_final_tab = []
76 if comments_columns:
77 for comment_column in comments_columns:
78 if comment_column in layer.fields:
79 comment_final_tab.append(feat.get(comment_column))
80 geom = feat.geom.geos
81 if not isinstance(geom, LineString):
82 if verbosity > 0:
83 self.stdout.write("%s's geometry is not a Linestring" % feat)
84 break
85 self.check_srid(srid, geom)
86 geom.dim = 2
87 if do_intersect and bbox.intersects(geom) or not do_intersect and geom.within(bbox):
88 try:
89 with transaction.atomic():
90 comment_final = '</br>'.join(comment_final_tab)
91 path = Path.objects.create(name=name,
92 structure=structure,
93 geom=geom,
94 comments=comment_final)
95 counter += 1
96 if verbosity > 0:
97 self.stdout.write('Create path with pk : {}'.format(path.pk))
98 if verbosity > 1:
99 self.stdout.write("The comment %s was added on %s" % (comment_final, name))
100 except (IntegrityError, InternalError):
101 if fail:
102 counter_fail += 1
103 self.stdout.write('Integrity Error on path : {}, {}'.format(name, geom))
104 else:
105 raise
106 if not dry:
107 transaction.savepoint_commit(sid)
108 if verbosity >= 2:
109 self.stdout.write(self.style.NOTICE(
110 "{0} objects created, {1} objects failed".format(counter, counter_fail)))
111 else:
112 transaction.savepoint_rollback(sid)
113 self.stdout.write(self.style.NOTICE(
114 "{0} objects will be create, {1} objects failed;".format(counter, counter_fail)))
115
116 def check_srid(self, srid, geom):
117 if not geom.srid:
118 geom.srid = srid
119 if geom.srid != settings.SRID:
120 try:
121 geom.transform(settings.SRID)
122 except GDALException:
123 raise CommandError("SRID is not well configurate, change/add option srid")
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/core/management/commands/loadpaths.py b/geotrek/core/management/commands/loadpaths.py
--- a/geotrek/core/management/commands/loadpaths.py
+++ b/geotrek/core/management/commands/loadpaths.py
@@ -37,7 +37,7 @@
structure = options.get('structure')
name_column = options.get('name')
srid = options.get('srid')
- do_intersect = options.get('intersect')
+ self.do_intersect = options.get('intersect')
comments_columns = options.get('comment')
fail = options.get('fail')
dry = options.get('dry')
@@ -64,8 +64,8 @@
ds = DataSource(file_path, encoding=encoding)
- bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)
- bbox.srid = settings.SRID
+ self.bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)
+ self.bbox.srid = settings.SRID
sid = transaction.savepoint()
@@ -84,7 +84,7 @@
break
self.check_srid(srid, geom)
geom.dim = 2
- if do_intersect and bbox.intersects(geom) or not do_intersect and geom.within(bbox):
+ if self.should_import(feat, geom):
try:
with transaction.atomic():
comment_final = '</br>'.join(comment_final_tab)
@@ -121,3 +121,9 @@
geom.transform(settings.SRID)
except GDALException:
raise CommandError("SRID is not well configurate, change/add option srid")
+
+ def should_import(self, feature, geom):
+ return (
+ self.do_intersect and self.bbox.intersects(geom)
+ or not self.do_intersect and geom.within(self.bbox)
+ )
| {"golden_diff": "diff --git a/geotrek/core/management/commands/loadpaths.py b/geotrek/core/management/commands/loadpaths.py\n--- a/geotrek/core/management/commands/loadpaths.py\n+++ b/geotrek/core/management/commands/loadpaths.py\n@@ -37,7 +37,7 @@\n structure = options.get('structure')\n name_column = options.get('name')\n srid = options.get('srid')\n- do_intersect = options.get('intersect')\n+ self.do_intersect = options.get('intersect')\n comments_columns = options.get('comment')\n fail = options.get('fail')\n dry = options.get('dry')\n@@ -64,8 +64,8 @@\n \n ds = DataSource(file_path, encoding=encoding)\n \n- bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)\n- bbox.srid = settings.SRID\n+ self.bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)\n+ self.bbox.srid = settings.SRID\n \n sid = transaction.savepoint()\n \n@@ -84,7 +84,7 @@\n break\n self.check_srid(srid, geom)\n geom.dim = 2\n- if do_intersect and bbox.intersects(geom) or not do_intersect and geom.within(bbox):\n+ if self.should_import(feat, geom):\n try:\n with transaction.atomic():\n comment_final = '</br>'.join(comment_final_tab)\n@@ -121,3 +121,9 @@\n geom.transform(settings.SRID)\n except GDALException:\n raise CommandError(\"SRID is not well configurate, change/add option srid\")\n+\n+ def should_import(self, feature, geom):\n+ return (\n+ self.do_intersect and self.bbox.intersects(geom)\n+ or not self.do_intersect and geom.within(self.bbox)\n+ )\n", "issue": "Personnalisation plus facile de loadpaths\nJ'ai un cas o\u00f9 j'aimerais importer un gros fichier Shape, mais o\u00f9 j'aimerais filtrer selon certains attributs de chaque \u00e9l\u00e9ment. Pour \u00e9viter de devoir r\u00e9\u00e9crire ma propre command `loadpaths` compl\u00e8te, il serait pratique de d\u00e9porter le filtrage des objets dans une m\u00e9thode de la commande. Le patch propos\u00e9 arrive...\n", "before_files": [{"content": "from django.contrib.gis.gdal import DataSource, GDALException\nfrom geotrek.core.models import Path\nfrom geotrek.authent.models import Structure\nfrom django.contrib.gis.geos.collections import Polygon, LineString\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.db.utils import IntegrityError, InternalError\nfrom django.db import transaction\n\n\nclass Command(BaseCommand):\n help = 'Load Paths from a file within the spatial extent\\n'\n\n def add_arguments(self, parser):\n parser.add_argument('file_path', help=\"File's path of the paths\")\n parser.add_argument('--structure', action='store', dest='structure', help=\"Define the structure\")\n parser.add_argument('--name-attribute', '-n', action='store', dest='name', default='nom',\n help=\"Name of the name's attribute inside the file\")\n parser.add_argument('--comments-attribute', '-c', nargs='*', action='store', dest='comment',\n help=\"\")\n parser.add_argument('--encoding', '-e', action='store', dest='encoding', default='utf-8',\n help='File encoding, default utf-8')\n parser.add_argument('--srid', '-s', action='store', dest='srid', default=4326, type=int,\n help=\"File's SRID\")\n parser.add_argument('--intersect', '-i', action='store_true', dest='intersect', default=False,\n help=\"Check paths intersect spatial extent and not only within\")\n parser.add_argument('--fail', '-f', action='store_true', dest='fail', default=False,\n help=\"Allows to grant fails\")\n parser.add_argument('--dry', '-d', action='store_true', dest='dry', default=False,\n help=\"Do not change the database, dry run. Show the number of fail\"\n \" and objects potentially created\")\n\n def handle(self, *args, **options):\n verbosity = options.get('verbosity')\n encoding = options.get('encoding')\n file_path = options.get('file_path')\n structure = options.get('structure')\n name_column = options.get('name')\n srid = options.get('srid')\n do_intersect = options.get('intersect')\n comments_columns = options.get('comment')\n fail = options.get('fail')\n dry = options.get('dry')\n\n if dry:\n fail = True\n\n counter = 0\n counter_fail = 0\n\n if structure:\n try:\n structure = Structure.objects.get(name=structure)\n except Structure.DoesNotExist:\n raise CommandError(\"Structure does not match with instance's structures\\n\"\n \"Change your option --structure\")\n elif Structure.objects.count() == 1:\n structure = Structure.objects.first()\n else:\n raise CommandError(\"There are more than 1 structure and you didn't define the option structure\\n\"\n \"Use --structure to define it\")\n if verbosity > 0:\n self.stdout.write(\"All paths in DataSource will be linked to the structure : %s\" % structure)\n\n ds = DataSource(file_path, encoding=encoding)\n\n bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)\n bbox.srid = settings.SRID\n\n sid = transaction.savepoint()\n\n for layer in ds:\n for feat in layer:\n name = feat.get(name_column) if name_column in layer.fields else ''\n comment_final_tab = []\n if comments_columns:\n for comment_column in comments_columns:\n if comment_column in layer.fields:\n comment_final_tab.append(feat.get(comment_column))\n geom = feat.geom.geos\n if not isinstance(geom, LineString):\n if verbosity > 0:\n self.stdout.write(\"%s's geometry is not a Linestring\" % feat)\n break\n self.check_srid(srid, geom)\n geom.dim = 2\n if do_intersect and bbox.intersects(geom) or not do_intersect and geom.within(bbox):\n try:\n with transaction.atomic():\n comment_final = '</br>'.join(comment_final_tab)\n path = Path.objects.create(name=name,\n structure=structure,\n geom=geom,\n comments=comment_final)\n counter += 1\n if verbosity > 0:\n self.stdout.write('Create path with pk : {}'.format(path.pk))\n if verbosity > 1:\n self.stdout.write(\"The comment %s was added on %s\" % (comment_final, name))\n except (IntegrityError, InternalError):\n if fail:\n counter_fail += 1\n self.stdout.write('Integrity Error on path : {}, {}'.format(name, geom))\n else:\n raise\n if not dry:\n transaction.savepoint_commit(sid)\n if verbosity >= 2:\n self.stdout.write(self.style.NOTICE(\n \"{0} objects created, {1} objects failed\".format(counter, counter_fail)))\n else:\n transaction.savepoint_rollback(sid)\n self.stdout.write(self.style.NOTICE(\n \"{0} objects will be create, {1} objects failed;\".format(counter, counter_fail)))\n\n def check_srid(self, srid, geom):\n if not geom.srid:\n geom.srid = srid\n if geom.srid != settings.SRID:\n try:\n geom.transform(settings.SRID)\n except GDALException:\n raise CommandError(\"SRID is not well configurate, change/add option srid\")\n", "path": "geotrek/core/management/commands/loadpaths.py"}], "after_files": [{"content": "from django.contrib.gis.gdal import DataSource, GDALException\nfrom geotrek.core.models import Path\nfrom geotrek.authent.models import Structure\nfrom django.contrib.gis.geos.collections import Polygon, LineString\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.db.utils import IntegrityError, InternalError\nfrom django.db import transaction\n\n\nclass Command(BaseCommand):\n help = 'Load Paths from a file within the spatial extent\\n'\n\n def add_arguments(self, parser):\n parser.add_argument('file_path', help=\"File's path of the paths\")\n parser.add_argument('--structure', action='store', dest='structure', help=\"Define the structure\")\n parser.add_argument('--name-attribute', '-n', action='store', dest='name', default='nom',\n help=\"Name of the name's attribute inside the file\")\n parser.add_argument('--comments-attribute', '-c', nargs='*', action='store', dest='comment',\n help=\"\")\n parser.add_argument('--encoding', '-e', action='store', dest='encoding', default='utf-8',\n help='File encoding, default utf-8')\n parser.add_argument('--srid', '-s', action='store', dest='srid', default=4326, type=int,\n help=\"File's SRID\")\n parser.add_argument('--intersect', '-i', action='store_true', dest='intersect', default=False,\n help=\"Check paths intersect spatial extent and not only within\")\n parser.add_argument('--fail', '-f', action='store_true', dest='fail', default=False,\n help=\"Allows to grant fails\")\n parser.add_argument('--dry', '-d', action='store_true', dest='dry', default=False,\n help=\"Do not change the database, dry run. Show the number of fail\"\n \" and objects potentially created\")\n\n def handle(self, *args, **options):\n verbosity = options.get('verbosity')\n encoding = options.get('encoding')\n file_path = options.get('file_path')\n structure = options.get('structure')\n name_column = options.get('name')\n srid = options.get('srid')\n self.do_intersect = options.get('intersect')\n comments_columns = options.get('comment')\n fail = options.get('fail')\n dry = options.get('dry')\n\n if dry:\n fail = True\n\n counter = 0\n counter_fail = 0\n\n if structure:\n try:\n structure = Structure.objects.get(name=structure)\n except Structure.DoesNotExist:\n raise CommandError(\"Structure does not match with instance's structures\\n\"\n \"Change your option --structure\")\n elif Structure.objects.count() == 1:\n structure = Structure.objects.first()\n else:\n raise CommandError(\"There are more than 1 structure and you didn't define the option structure\\n\"\n \"Use --structure to define it\")\n if verbosity > 0:\n self.stdout.write(\"All paths in DataSource will be linked to the structure : %s\" % structure)\n\n ds = DataSource(file_path, encoding=encoding)\n\n self.bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT)\n self.bbox.srid = settings.SRID\n\n sid = transaction.savepoint()\n\n for layer in ds:\n for feat in layer:\n name = feat.get(name_column) if name_column in layer.fields else ''\n comment_final_tab = []\n if comments_columns:\n for comment_column in comments_columns:\n if comment_column in layer.fields:\n comment_final_tab.append(feat.get(comment_column))\n geom = feat.geom.geos\n if not isinstance(geom, LineString):\n if verbosity > 0:\n self.stdout.write(\"%s's geometry is not a Linestring\" % feat)\n break\n self.check_srid(srid, geom)\n geom.dim = 2\n if self.should_import(feat, geom):\n try:\n with transaction.atomic():\n comment_final = '</br>'.join(comment_final_tab)\n path = Path.objects.create(name=name,\n structure=structure,\n geom=geom,\n comments=comment_final)\n counter += 1\n if verbosity > 0:\n self.stdout.write('Create path with pk : {}'.format(path.pk))\n if verbosity > 1:\n self.stdout.write(\"The comment %s was added on %s\" % (comment_final, name))\n except (IntegrityError, InternalError):\n if fail:\n counter_fail += 1\n self.stdout.write('Integrity Error on path : {}, {}'.format(name, geom))\n else:\n raise\n if not dry:\n transaction.savepoint_commit(sid)\n if verbosity >= 2:\n self.stdout.write(self.style.NOTICE(\n \"{0} objects created, {1} objects failed\".format(counter, counter_fail)))\n else:\n transaction.savepoint_rollback(sid)\n self.stdout.write(self.style.NOTICE(\n \"{0} objects will be create, {1} objects failed;\".format(counter, counter_fail)))\n\n def check_srid(self, srid, geom):\n if not geom.srid:\n geom.srid = srid\n if geom.srid != settings.SRID:\n try:\n geom.transform(settings.SRID)\n except GDALException:\n raise CommandError(\"SRID is not well configurate, change/add option srid\")\n\n def should_import(self, feature, geom):\n return (\n self.do_intersect and self.bbox.intersects(geom)\n or not self.do_intersect and geom.within(self.bbox)\n )\n", "path": "geotrek/core/management/commands/loadpaths.py"}]} | 1,762 | 405 |
gh_patches_debug_33518 | rasdani/github-patches | git_diff | qtile__qtile-1696 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
python compatibility about timezone parameter for widget.Clock
# Issue description
The following widget configuration doesn't work for python 3.8.2:
```
widget.Clock( format="%H:%M:%S", timezone="Asia/Taipei")
```
I made a workaround for this:
```
from dateutil.tz import *
widget.Clock( format="%H:%M:%S", timezone=gettz("Asia/Taipei"))
```
This error is related to the code snippets in `libqtile/widget/clock.py`:
```
def poll(self):
if self.timezone:
now = datetime.now(timezone.utc).astimezone(self.timezone)
else:
now = datetime.now(timezone.utc).astimezone()
return (now + self.DELTA).strftime(self.format)
```
It seems python 3.6+ has compatibility issue of timezone parameters, and native python doesn't support timezone locale like "Asia/Tokyo","Europe/Warsaw", ... or so. Currently I include `dateutil` to bypass the syntax error
# Qtile version
qtile 0.15.1-1 (ArchLinux)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/clock.py`
Content:
```
1 # Copyright (c) 2010 Aldo Cortesi
2 # Copyright (c) 2012 Andrew Grigorev
3 # Copyright (c) 2014 Sean Vig
4 # Copyright (c) 2014 Tycho Andersen
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # SOFTWARE.
23
24 import sys
25 import time
26 from datetime import datetime, timedelta, timezone
27
28 from libqtile.log_utils import logger
29 from libqtile.widget import base
30
31 try:
32 import pytz
33 except ImportError:
34 pass
35
36
37 class Clock(base.InLoopPollText):
38 """A simple but flexible text-based clock"""
39 orientations = base.ORIENTATION_HORIZONTAL
40 defaults = [
41 ('format', '%H:%M', 'A Python datetime format string'),
42 ('update_interval', 1., 'Update interval for the clock'),
43 ('timezone', None, 'The timezone to use for this clock, either as'
44 ' string if pytz is installed (e.g. "US/Central" or anything in'
45 ' /usr/share/zoneinfo), or as tzinfo (e.g. datetime.timezone.utc).'
46 ' None means the system local timezone and is the default.')
47 ]
48 DELTA = timedelta(seconds=0.5)
49
50 def __init__(self, **config):
51 base.InLoopPollText.__init__(self, **config)
52 self.add_defaults(Clock.defaults)
53 if isinstance(self.timezone, str):
54 if "pytz" in sys.modules:
55 self.timezone = pytz.timezone(self.timezone)
56 else:
57 logger.warning('Clock widget can not infer its timezone from a'
58 ' string without the pytz library. Install pytz'
59 ' or give it a datetime.tzinfo instance.')
60 if self.timezone is None:
61 logger.info('Defaulting to the system local timezone.')
62
63 def tick(self):
64 self.update(self.poll())
65 return self.update_interval - time.time() % self.update_interval
66
67 # adding .5 to get a proper seconds value because glib could
68 # theoreticaly call our method too early and we could get something
69 # like (x-1).999 instead of x.000
70 def poll(self):
71 if self.timezone:
72 now = datetime.now(timezone.utc).astimezone(self.timezone)
73 else:
74 now = datetime.now(timezone.utc).astimezone()
75 return (now + self.DELTA).strftime(self.format)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/widget/clock.py b/libqtile/widget/clock.py
--- a/libqtile/widget/clock.py
+++ b/libqtile/widget/clock.py
@@ -33,6 +33,11 @@
except ImportError:
pass
+try:
+ import dateutil.tz
+except ImportError:
+ pass
+
class Clock(base.InLoopPollText):
"""A simple but flexible text-based clock"""
@@ -41,9 +46,10 @@
('format', '%H:%M', 'A Python datetime format string'),
('update_interval', 1., 'Update interval for the clock'),
('timezone', None, 'The timezone to use for this clock, either as'
- ' string if pytz is installed (e.g. "US/Central" or anything in'
- ' /usr/share/zoneinfo), or as tzinfo (e.g. datetime.timezone.utc).'
- ' None means the system local timezone and is the default.')
+ ' string if pytz or dateutil is installed (e.g. "US/Central" or'
+ ' anything in /usr/share/zoneinfo), or as tzinfo (e.g.'
+ ' datetime.timezone.utc). None means the system local timezone and is'
+ ' the default.')
]
DELTA = timedelta(seconds=0.5)
@@ -53,10 +59,13 @@
if isinstance(self.timezone, str):
if "pytz" in sys.modules:
self.timezone = pytz.timezone(self.timezone)
+ elif "dateutil" in sys.modules:
+ self.timezone = dateutil.tz.gettz(self.timezone)
else:
logger.warning('Clock widget can not infer its timezone from a'
- ' string without the pytz library. Install pytz'
- ' or give it a datetime.tzinfo instance.')
+ ' string without pytz or dateutil. Install one'
+ ' of these libraries, or give it a'
+ ' datetime.tzinfo instance.')
if self.timezone is None:
logger.info('Defaulting to the system local timezone.')
| {"golden_diff": "diff --git a/libqtile/widget/clock.py b/libqtile/widget/clock.py\n--- a/libqtile/widget/clock.py\n+++ b/libqtile/widget/clock.py\n@@ -33,6 +33,11 @@\n except ImportError:\n pass\n \n+try:\n+ import dateutil.tz\n+except ImportError:\n+ pass\n+\n \n class Clock(base.InLoopPollText):\n \"\"\"A simple but flexible text-based clock\"\"\"\n@@ -41,9 +46,10 @@\n ('format', '%H:%M', 'A Python datetime format string'),\n ('update_interval', 1., 'Update interval for the clock'),\n ('timezone', None, 'The timezone to use for this clock, either as'\n- ' string if pytz is installed (e.g. \"US/Central\" or anything in'\n- ' /usr/share/zoneinfo), or as tzinfo (e.g. datetime.timezone.utc).'\n- ' None means the system local timezone and is the default.')\n+ ' string if pytz or dateutil is installed (e.g. \"US/Central\" or'\n+ ' anything in /usr/share/zoneinfo), or as tzinfo (e.g.'\n+ ' datetime.timezone.utc). None means the system local timezone and is'\n+ ' the default.')\n ]\n DELTA = timedelta(seconds=0.5)\n \n@@ -53,10 +59,13 @@\n if isinstance(self.timezone, str):\n if \"pytz\" in sys.modules:\n self.timezone = pytz.timezone(self.timezone)\n+ elif \"dateutil\" in sys.modules:\n+ self.timezone = dateutil.tz.gettz(self.timezone)\n else:\n logger.warning('Clock widget can not infer its timezone from a'\n- ' string without the pytz library. Install pytz'\n- ' or give it a datetime.tzinfo instance.')\n+ ' string without pytz or dateutil. Install one'\n+ ' of these libraries, or give it a'\n+ ' datetime.tzinfo instance.')\n if self.timezone is None:\n logger.info('Defaulting to the system local timezone.')\n", "issue": "python compatibility about timezone parameter for widget.Clock\n# Issue description\r\n\r\nThe following widget configuration doesn't work for python 3.8.2:\r\n```\r\nwidget.Clock( format=\"%H:%M:%S\", timezone=\"Asia/Taipei\")\r\n```\r\n\r\nI made a workaround for this:\r\n```\r\nfrom dateutil.tz import *\r\nwidget.Clock( format=\"%H:%M:%S\", timezone=gettz(\"Asia/Taipei\"))\r\n```\r\n\r\nThis error is related to the code snippets in `libqtile/widget/clock.py`:\r\n```\r\n def poll(self):\r\n if self.timezone:\r\n now = datetime.now(timezone.utc).astimezone(self.timezone)\r\n else:\r\n now = datetime.now(timezone.utc).astimezone()\r\n return (now + self.DELTA).strftime(self.format)\r\n```\r\n\r\nIt seems python 3.6+ has compatibility issue of timezone parameters, and native python doesn't support timezone locale like \"Asia/Tokyo\",\"Europe/Warsaw\", ... or so. Currently I include `dateutil` to bypass the syntax error\r\n\r\n\r\n# Qtile version\r\nqtile 0.15.1-1 (ArchLinux)\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2012 Andrew Grigorev\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport time\nfrom datetime import datetime, timedelta, timezone\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\ntry:\n import pytz\nexcept ImportError:\n pass\n\n\nclass Clock(base.InLoopPollText):\n \"\"\"A simple but flexible text-based clock\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('format', '%H:%M', 'A Python datetime format string'),\n ('update_interval', 1., 'Update interval for the clock'),\n ('timezone', None, 'The timezone to use for this clock, either as'\n ' string if pytz is installed (e.g. \"US/Central\" or anything in'\n ' /usr/share/zoneinfo), or as tzinfo (e.g. datetime.timezone.utc).'\n ' None means the system local timezone and is the default.')\n ]\n DELTA = timedelta(seconds=0.5)\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(Clock.defaults)\n if isinstance(self.timezone, str):\n if \"pytz\" in sys.modules:\n self.timezone = pytz.timezone(self.timezone)\n else:\n logger.warning('Clock widget can not infer its timezone from a'\n ' string without the pytz library. Install pytz'\n ' or give it a datetime.tzinfo instance.')\n if self.timezone is None:\n logger.info('Defaulting to the system local timezone.')\n\n def tick(self):\n self.update(self.poll())\n return self.update_interval - time.time() % self.update_interval\n\n # adding .5 to get a proper seconds value because glib could\n # theoreticaly call our method too early and we could get something\n # like (x-1).999 instead of x.000\n def poll(self):\n if self.timezone:\n now = datetime.now(timezone.utc).astimezone(self.timezone)\n else:\n now = datetime.now(timezone.utc).astimezone()\n return (now + self.DELTA).strftime(self.format)\n", "path": "libqtile/widget/clock.py"}], "after_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2012 Andrew Grigorev\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport time\nfrom datetime import datetime, timedelta, timezone\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\ntry:\n import pytz\nexcept ImportError:\n pass\n\ntry:\n import dateutil.tz\nexcept ImportError:\n pass\n\n\nclass Clock(base.InLoopPollText):\n \"\"\"A simple but flexible text-based clock\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('format', '%H:%M', 'A Python datetime format string'),\n ('update_interval', 1., 'Update interval for the clock'),\n ('timezone', None, 'The timezone to use for this clock, either as'\n ' string if pytz or dateutil is installed (e.g. \"US/Central\" or'\n ' anything in /usr/share/zoneinfo), or as tzinfo (e.g.'\n ' datetime.timezone.utc). None means the system local timezone and is'\n ' the default.')\n ]\n DELTA = timedelta(seconds=0.5)\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(Clock.defaults)\n if isinstance(self.timezone, str):\n if \"pytz\" in sys.modules:\n self.timezone = pytz.timezone(self.timezone)\n elif \"dateutil\" in sys.modules:\n self.timezone = dateutil.tz.gettz(self.timezone)\n else:\n logger.warning('Clock widget can not infer its timezone from a'\n ' string without pytz or dateutil. Install one'\n ' of these libraries, or give it a'\n ' datetime.tzinfo instance.')\n if self.timezone is None:\n logger.info('Defaulting to the system local timezone.')\n\n def tick(self):\n self.update(self.poll())\n return self.update_interval - time.time() % self.update_interval\n\n # adding .5 to get a proper seconds value because glib could\n # theoreticaly call our method too early and we could get something\n # like (x-1).999 instead of x.000\n def poll(self):\n if self.timezone:\n now = datetime.now(timezone.utc).astimezone(self.timezone)\n else:\n now = datetime.now(timezone.utc).astimezone()\n return (now + self.DELTA).strftime(self.format)\n", "path": "libqtile/widget/clock.py"}]} | 1,376 | 463 |
gh_patches_debug_401 | rasdani/github-patches | git_diff | getmoto__moto-698 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to create a key with a trailing slash using OrdinaryCallingFormat
When using OrdinaryCallingFormat, it's not possible to create a key ending with a slash (e.g. when mimicking directory creation), since this is stripped off when parsing the key name. I can't comment on S3, but this is at least different behaviour from Ceph.
For example, the below fails as is, but works if the connection uses SubdomainCallingFormat instead.
```
import boto
import moto
import unittest
class TestCreatingKeyEndingWithSlash(unittest.TestCase):
@moto.mock_s3
def test_ordinary_calling_format(self):
bucket_name = 'testbucket'
key_name = 'key_ending_with_slash/'
conn = boto.connect_s3('access_key', 'secret_key',
calling_format=boto.s3.connection.OrdinaryCallingFormat())
bucket = conn.create_bucket(bucket_name)
key = boto.s3.key.Key(bucket)
key.key = key_name
key.set_contents_from_string('')
self.assertIn(key_name, [k.name for k in bucket.get_all_keys()])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/s3bucket_path/utils.py`
Content:
```
1 from __future__ import unicode_literals
2 from six.moves.urllib.parse import urlparse
3
4
5 def bucket_name_from_url(url):
6 pth = urlparse(url).path.lstrip("/")
7
8 l = pth.lstrip("/").split("/")
9 if len(l) == 0 or l[0] == "":
10 return None
11 return l[0]
12
13
14 def parse_key_name(path):
15 return "/".join(path.rstrip("/").split("/")[2:])
16
17
18 def is_delete_keys(request, path, bucket_name):
19 return (
20 path == u'/' + bucket_name + u'/?delete' or
21 path == u'/' + bucket_name + u'?delete' or
22 (path == u'/' + bucket_name and
23 getattr(request, "query_string", "") == "delete")
24 )
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/s3bucket_path/utils.py b/moto/s3bucket_path/utils.py
--- a/moto/s3bucket_path/utils.py
+++ b/moto/s3bucket_path/utils.py
@@ -12,7 +12,7 @@
def parse_key_name(path):
- return "/".join(path.rstrip("/").split("/")[2:])
+ return "/".join(path.split("/")[2:])
def is_delete_keys(request, path, bucket_name):
| {"golden_diff": "diff --git a/moto/s3bucket_path/utils.py b/moto/s3bucket_path/utils.py\n--- a/moto/s3bucket_path/utils.py\n+++ b/moto/s3bucket_path/utils.py\n@@ -12,7 +12,7 @@\n \n \n def parse_key_name(path):\n- return \"/\".join(path.rstrip(\"/\").split(\"/\")[2:])\n+ return \"/\".join(path.split(\"/\")[2:])\n \n \n def is_delete_keys(request, path, bucket_name):\n", "issue": "Unable to create a key with a trailing slash using OrdinaryCallingFormat\nWhen using OrdinaryCallingFormat, it's not possible to create a key ending with a slash (e.g. when mimicking directory creation), since this is stripped off when parsing the key name. I can't comment on S3, but this is at least different behaviour from Ceph.\n\nFor example, the below fails as is, but works if the connection uses SubdomainCallingFormat instead.\n\n```\nimport boto\nimport moto\nimport unittest\n\n\nclass TestCreatingKeyEndingWithSlash(unittest.TestCase):\n\n @moto.mock_s3\n def test_ordinary_calling_format(self):\n bucket_name = 'testbucket'\n key_name = 'key_ending_with_slash/'\n\n conn = boto.connect_s3('access_key', 'secret_key',\n calling_format=boto.s3.connection.OrdinaryCallingFormat())\n bucket = conn.create_bucket(bucket_name)\n\n key = boto.s3.key.Key(bucket)\n key.key = key_name\n key.set_contents_from_string('')\n\n self.assertIn(key_name, [k.name for k in bucket.get_all_keys()])\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom six.moves.urllib.parse import urlparse\n\n\ndef bucket_name_from_url(url):\n pth = urlparse(url).path.lstrip(\"/\")\n\n l = pth.lstrip(\"/\").split(\"/\")\n if len(l) == 0 or l[0] == \"\":\n return None\n return l[0]\n\n\ndef parse_key_name(path):\n return \"/\".join(path.rstrip(\"/\").split(\"/\")[2:])\n\n\ndef is_delete_keys(request, path, bucket_name):\n return (\n path == u'/' + bucket_name + u'/?delete' or\n path == u'/' + bucket_name + u'?delete' or\n (path == u'/' + bucket_name and\n getattr(request, \"query_string\", \"\") == \"delete\")\n )\n", "path": "moto/s3bucket_path/utils.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom six.moves.urllib.parse import urlparse\n\n\ndef bucket_name_from_url(url):\n pth = urlparse(url).path.lstrip(\"/\")\n\n l = pth.lstrip(\"/\").split(\"/\")\n if len(l) == 0 or l[0] == \"\":\n return None\n return l[0]\n\n\ndef parse_key_name(path):\n return \"/\".join(path.split(\"/\")[2:])\n\n\ndef is_delete_keys(request, path, bucket_name):\n return (\n path == u'/' + bucket_name + u'/?delete' or\n path == u'/' + bucket_name + u'?delete' or\n (path == u'/' + bucket_name and\n getattr(request, \"query_string\", \"\") == \"delete\")\n )\n", "path": "moto/s3bucket_path/utils.py"}]} | 709 | 102 |
gh_patches_debug_14293 | rasdani/github-patches | git_diff | psychopy__psychopy-569 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature request: develop a pylint rule-set
pylint is a code analysis tool, and will err of the side of being super duper ultra nitpicky (which is great). You just then turn some things off to see the signal in the noise. For example, I've been bitten by mutable default values to a method / function, and it will catch this. It flags bare excepts -- lots of useful stuff.
If anyone has experience with pylint, it would be great to have advice on what works well, and what is likely to work well for PsychoPy given its history and current conventions. If its counterproductive to start using pylint with a codebase this large, that would be helpful to know.
I'm thinking that even if its never run as part of the build process, it might be nice to have a project-wide pylintrc file that makes explicit what style conventions are expected (long lines ok, variable name conventions, etc). This seems like a powerful way to communicate the conventions.
PsychoPy currently has lots of bare excepts, bad indentations, unused variables, redefined builtins, unused imports, and so on -- seemingly all good targets for clean-up work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/misc.py`
Content:
```
1 #!/usr/bin/env python2
2
3 # Part of the PsychoPy library
4 # Copyright (C) 2014 Jonathan Peirce
5 # Distributed under the terms of the GNU General Public License (GPL).
6
7 '''Wrapper for all miscellaneous functions and classes from psychopy.tools'''
8
9 from psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,
10 ratioRange, shuffleArray, val2array)
11
12 from psychopy.tools.attributetools import attributeSetter, setWithOperation
13
14 from psychopy.tools.colorspacetools import (dkl2rgb, dklCart2rgb,
15 hsv2rgb, lms2rgb,
16 rgb2dklCart, rgb2lms)
17
18 from psychopy.tools.coordinatetools import (cart2pol, pol2cart,
19 cart2sph, sph2cart)
20
21 from psychopy.tools.fileerrortools import handleFileCollision
22
23 from psychopy.tools.filetools import toFile, fromFile, mergeFolder
24
25 from psychopy.tools.imagetools import array2image, image2array, makeImageAuto
26
27 from psychopy.tools.monitorunittools import (cm2deg, deg2cm, cm2pix, pix2cm,
28 deg2pix, pix2deg, convertToPix)
29
30 from psychopy.tools.plottools import plotFrameIntervals
31
32 from psychopy.tools.typetools import float_uint8, float_uint16, uint8_float
33
34 from numpy import radians, degrees
35
```
Path: `psychopy/tools/unittools.py`
Content:
```
1 #!/usr/bin/env python2
2
3 # Part of the PsychoPy library
4 # Copyright (C) 2014 Jonathan Peirce
5 # Distributed under the terms of the GNU General Public License (GPL).
6
7 '''Functions and classes related to unit conversion'''
8
9 # This module is not used by psychopy; retained for backwards compatibility
10 # for user-scripts.
11
12 from numpy import radians, degrees
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/misc.py b/psychopy/misc.py
--- a/psychopy/misc.py
+++ b/psychopy/misc.py
@@ -6,6 +6,9 @@
'''Wrapper for all miscellaneous functions and classes from psychopy.tools'''
+# pylint: disable=W0611
+# W0611 = Unused import %s
+
from psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,
ratioRange, shuffleArray, val2array)
diff --git a/psychopy/tools/unittools.py b/psychopy/tools/unittools.py
--- a/psychopy/tools/unittools.py
+++ b/psychopy/tools/unittools.py
@@ -9,4 +9,7 @@
# This module is not used by psychopy; retained for backwards compatibility
# for user-scripts.
+# pylint: disable=W0611
+# W0611 = Unused import %s
+
from numpy import radians, degrees
| {"golden_diff": "diff --git a/psychopy/misc.py b/psychopy/misc.py\n--- a/psychopy/misc.py\n+++ b/psychopy/misc.py\n@@ -6,6 +6,9 @@\n \n '''Wrapper for all miscellaneous functions and classes from psychopy.tools'''\n \n+# pylint: disable=W0611\n+# W0611 = Unused import %s\n+\n from psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,\n ratioRange, shuffleArray, val2array)\n \ndiff --git a/psychopy/tools/unittools.py b/psychopy/tools/unittools.py\n--- a/psychopy/tools/unittools.py\n+++ b/psychopy/tools/unittools.py\n@@ -9,4 +9,7 @@\n # This module is not used by psychopy; retained for backwards compatibility\n # for user-scripts.\n \n+# pylint: disable=W0611\n+# W0611 = Unused import %s\n+\n from numpy import radians, degrees\n", "issue": "feature request: develop a pylint rule-set\npylint is a code analysis tool, and will err of the side of being super duper ultra nitpicky (which is great). You just then turn some things off to see the signal in the noise. For example, I've been bitten by mutable default values to a method / function, and it will catch this. It flags bare excepts -- lots of useful stuff.\n\nIf anyone has experience with pylint, it would be great to have advice on what works well, and what is likely to work well for PsychoPy given its history and current conventions. If its counterproductive to start using pylint with a codebase this large, that would be helpful to know.\n\nI'm thinking that even if its never run as part of the build process, it might be nice to have a project-wide pylintrc file that makes explicit what style conventions are expected (long lines ok, variable name conventions, etc). This seems like a powerful way to communicate the conventions. \n\nPsychoPy currently has lots of bare excepts, bad indentations, unused variables, redefined builtins, unused imports, and so on -- seemingly all good targets for clean-up work.\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n'''Wrapper for all miscellaneous functions and classes from psychopy.tools'''\n\nfrom psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,\n ratioRange, shuffleArray, val2array)\n\nfrom psychopy.tools.attributetools import attributeSetter, setWithOperation\n\nfrom psychopy.tools.colorspacetools import (dkl2rgb, dklCart2rgb,\n hsv2rgb, lms2rgb,\n rgb2dklCart, rgb2lms)\n\nfrom psychopy.tools.coordinatetools import (cart2pol, pol2cart,\n cart2sph, sph2cart)\n\nfrom psychopy.tools.fileerrortools import handleFileCollision\n\nfrom psychopy.tools.filetools import toFile, fromFile, mergeFolder\n\nfrom psychopy.tools.imagetools import array2image, image2array, makeImageAuto\n\nfrom psychopy.tools.monitorunittools import (cm2deg, deg2cm, cm2pix, pix2cm,\n deg2pix, pix2deg, convertToPix)\n\nfrom psychopy.tools.plottools import plotFrameIntervals\n\nfrom psychopy.tools.typetools import float_uint8, float_uint16, uint8_float\n\nfrom numpy import radians, degrees\n", "path": "psychopy/misc.py"}, {"content": "#!/usr/bin/env python2\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n'''Functions and classes related to unit conversion'''\n\n# This module is not used by psychopy; retained for backwards compatibility\n# for user-scripts.\n\nfrom numpy import radians, degrees\n", "path": "psychopy/tools/unittools.py"}], "after_files": [{"content": "#!/usr/bin/env python2\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n'''Wrapper for all miscellaneous functions and classes from psychopy.tools'''\n\n# pylint: disable=W0611\n# W0611 = Unused import %s\n\nfrom psychopy.tools.arraytools import (createXYs, extendArr, makeRadialMatrix,\n ratioRange, shuffleArray, val2array)\n\nfrom psychopy.tools.attributetools import attributeSetter, setWithOperation\n\nfrom psychopy.tools.colorspacetools import (dkl2rgb, dklCart2rgb,\n hsv2rgb, lms2rgb,\n rgb2dklCart, rgb2lms)\n\nfrom psychopy.tools.coordinatetools import (cart2pol, pol2cart,\n cart2sph, sph2cart)\n\nfrom psychopy.tools.fileerrortools import handleFileCollision\n\nfrom psychopy.tools.filetools import toFile, fromFile, mergeFolder\n\nfrom psychopy.tools.imagetools import array2image, image2array, makeImageAuto\n\nfrom psychopy.tools.monitorunittools import (cm2deg, deg2cm, cm2pix, pix2cm,\n deg2pix, pix2deg, convertToPix)\n\nfrom psychopy.tools.plottools import plotFrameIntervals\n\nfrom psychopy.tools.typetools import float_uint8, float_uint16, uint8_float\n\nfrom numpy import radians, degrees\n", "path": "psychopy/misc.py"}, {"content": "#!/usr/bin/env python2\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n'''Functions and classes related to unit conversion'''\n\n# This module is not used by psychopy; retained for backwards compatibility\n# for user-scripts.\n\n# pylint: disable=W0611\n# W0611 = Unused import %s\n\nfrom numpy import radians, degrees\n", "path": "psychopy/tools/unittools.py"}]} | 999 | 213 |
gh_patches_debug_10732 | rasdani/github-patches | git_diff | streamlink__streamlink-5376 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.mediavitrina: no playable streams found on player URLs
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
Since january streamlink can't handle gazprom-media mediavitrina urls like:
https://player.mediavitrina.ru/gpm_tnt_v2/tnt/vitrinatv_web/player.html
https://player.mediavitrina.ru/gpm_friday_v2/friday/vitrinatv_web/player.html
https://player.mediavitrina.ru/tv3_v2/tv3/vitrinatv_web/player.html
The reason for that is beause mediavitrina can't open a required json file like
https://media.mediavitrina.ru/api/v3/gpm-tnt/playlist/tnt_as_array.json?application_id=&player_referer_hostname=vitrina.tv&config_checksum_sha256=&egress_version_id=1950111
what i know:
when i try to open this json file directly in browser it fails but when i specify a referer "https://player.mediavitrina.ru/" for media.mediavitrina.ru url using firefox extension it opens perfectly
so i think mediavitrina plugin does not send this referer requesting json from media.mediavitrina.ru URL, it sends referer only for player.mediavitrina.ru URLs
please fix this issue
P.S.:
it would be futureproof if this plugin just could handle https://media.mediavitrina.ru/api/v1/gpm-tnt/playlist/tnt_as_array.json URLs directly
### Debug log
```text
[cli][info] Found matching plugin mediavitrina for URL https://player.mediavitri
na.ru/gpm_tnt_v2/tnt/vitrinatv_web/player.html
error: No playable streams found on this URL: https://player.mediavitrina.ru/gpm
_tnt_v2/tnt/vitrinatv_web/player.html
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/mediavitrina.py`
Content:
```
1 """
2 $description Russian live streaming platform hosting various Russian live TV channels.
3 $url mediavitrina.ru
4 $type live
5 $region Russia
6 """
7
8 import logging
9 import re
10 from urllib.parse import urlparse
11
12 from streamlink.plugin import Plugin, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.stream.hls import HLSStream
15 from streamlink.utils.url import update_qsd
16
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(re.compile(r"""https?://(?:www\.)?(?:
22 chetv
23 |
24 ctc(?:love)?
25 |
26 domashniy
27 )\.ru/(?:live|online)""", re.VERBOSE))
28 @pluginmatcher(re.compile(r"https?://player\.mediavitrina\.ru/.+/player\.html"))
29 class MediaVitrina(Plugin):
30 _re_url_json = re.compile(r"https://media\.mediavitrina\.ru/(?:proxy)?api/v3/\w+/playlist/[\w-]+_as_array\.json[^\"']+")
31
32 def _get_streams(self):
33 self.session.http.headers.update({"Referer": self.url})
34
35 p_netloc = urlparse(self.url).netloc
36 if p_netloc == "player.mediavitrina.ru":
37 # https://player.mediavitrina.ru/
38 url_player = self.url
39 elif p_netloc.endswith("ctc.ru"):
40 # https://ctc.ru/online/
41 url_player = self.session.http.get(
42 "https://ctc.ru/api/page/v1/online/",
43 schema=validate.Schema(
44 validate.parse_json(),
45 {"content": validate.all(
46 [dict],
47 validate.filter(lambda n: n.get("type") == "on-air"),
48 [{"onAirLink": validate.url(netloc="player.mediavitrina.ru")}],
49 validate.get((0, "onAirLink")),
50 )},
51 validate.get("content"),
52 ),
53 )
54 else:
55 # https://chetv.ru/online/
56 # https://ctclove.ru/online/
57 # https://domashniy.ru/online/
58 url_player = self.session.http.get(self.url, schema=validate.Schema(
59 validate.parse_html(),
60 validate.xml_xpath_string(".//iframe[starts-with(@src,'https://player.mediavitrina.ru/')]/@src"),
61 ), acceptable_status=(200, 403, 404))
62
63 if not url_player:
64 return
65
66 log.debug(f"url_player={url_player}")
67 script_data = self.session.http.get(url_player, schema=validate.Schema(
68 validate.parse_html(),
69 validate.xml_xpath_string(".//script[contains(text(),'media.mediavitrina.ru/')]/text()"),
70 ))
71 if not script_data:
72 log.debug("invalid script_data")
73 return
74
75 m = self._re_url_json.search(script_data)
76 if not m:
77 log.debug("invalid url_json")
78 return
79
80 url_json = m.group(0)
81 log.debug(f"url_json={url_json}")
82 url_json = re.sub(r"\{\{PLAYER_REFERER_HOSTNAME\}\}", "mediavitrina.ru", url_json)
83 url_json = re.sub(r"\{\{[A-Za-z_]+\}\}", "", url_json)
84
85 res_token = self.session.http.get(
86 "https://media.mediavitrina.ru/get_token",
87 schema=validate.Schema(
88 validate.parse_json(),
89 {"result": {"token": str}},
90 validate.get("result"),
91 ))
92 url = self.session.http.get(
93 update_qsd(url_json, qsd=res_token),
94 schema=validate.Schema(
95 validate.parse_json(),
96 {"hls": [validate.url()]},
97 validate.get(("hls", 0)),
98 ))
99
100 if not url:
101 return
102
103 if "georestrictions" in url:
104 log.error("Stream is geo-restricted")
105 return
106
107 return HLSStream.parse_variant_playlist(self.session, url, name_fmt="{pixels}_{bitrate}")
108
109
110 __plugin__ = MediaVitrina
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/mediavitrina.py b/src/streamlink/plugins/mediavitrina.py
--- a/src/streamlink/plugins/mediavitrina.py
+++ b/src/streamlink/plugins/mediavitrina.py
@@ -27,7 +27,7 @@
)\.ru/(?:live|online)""", re.VERBOSE))
@pluginmatcher(re.compile(r"https?://player\.mediavitrina\.ru/.+/player\.html"))
class MediaVitrina(Plugin):
- _re_url_json = re.compile(r"https://media\.mediavitrina\.ru/(?:proxy)?api/v3/\w+/playlist/[\w-]+_as_array\.json[^\"']+")
+ _re_url_json = re.compile(r"https://media\.mediavitrina\.ru/(?:proxy)?api/v3/[\w-]+/playlist/[\w-]+_as_array\.json[^\"']+")
def _get_streams(self):
self.session.http.headers.update({"Referer": self.url})
| {"golden_diff": "diff --git a/src/streamlink/plugins/mediavitrina.py b/src/streamlink/plugins/mediavitrina.py\n--- a/src/streamlink/plugins/mediavitrina.py\n+++ b/src/streamlink/plugins/mediavitrina.py\n@@ -27,7 +27,7 @@\n )\\.ru/(?:live|online)\"\"\", re.VERBOSE))\n @pluginmatcher(re.compile(r\"https?://player\\.mediavitrina\\.ru/.+/player\\.html\"))\n class MediaVitrina(Plugin):\n- _re_url_json = re.compile(r\"https://media\\.mediavitrina\\.ru/(?:proxy)?api/v3/\\w+/playlist/[\\w-]+_as_array\\.json[^\\\"']+\")\n+ _re_url_json = re.compile(r\"https://media\\.mediavitrina\\.ru/(?:proxy)?api/v3/[\\w-]+/playlist/[\\w-]+_as_array\\.json[^\\\"']+\")\n \n def _get_streams(self):\n self.session.http.headers.update({\"Referer\": self.url})\n", "issue": "plugins.mediavitrina: no playable streams found on player URLs\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nSince january streamlink can't handle gazprom-media mediavitrina urls like:\r\n\r\nhttps://player.mediavitrina.ru/gpm_tnt_v2/tnt/vitrinatv_web/player.html\r\nhttps://player.mediavitrina.ru/gpm_friday_v2/friday/vitrinatv_web/player.html\r\nhttps://player.mediavitrina.ru/tv3_v2/tv3/vitrinatv_web/player.html\r\n\r\nThe reason for that is beause mediavitrina can't open a required json file like\r\nhttps://media.mediavitrina.ru/api/v3/gpm-tnt/playlist/tnt_as_array.json?application_id=&player_referer_hostname=vitrina.tv&config_checksum_sha256=&egress_version_id=1950111\r\n\r\nwhat i know:\r\nwhen i try to open this json file directly in browser it fails but when i specify a referer \"https://player.mediavitrina.ru/\" for media.mediavitrina.ru url using firefox extension it opens perfectly\r\nso i think mediavitrina plugin does not send this referer requesting json from media.mediavitrina.ru URL, it sends referer only for player.mediavitrina.ru URLs\r\n\r\nplease fix this issue\r\nP.S.:\r\nit would be futureproof if this plugin just could handle https://media.mediavitrina.ru/api/v1/gpm-tnt/playlist/tnt_as_array.json URLs directly\n\n### Debug log\n\n```text\n[cli][info] Found matching plugin mediavitrina for URL https://player.mediavitri\r\nna.ru/gpm_tnt_v2/tnt/vitrinatv_web/player.html\r\nerror: No playable streams found on this URL: https://player.mediavitrina.ru/gpm\r\n_tnt_v2/tnt/vitrinatv_web/player.html\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Russian live streaming platform hosting various Russian live TV channels.\n$url mediavitrina.ru\n$type live\n$region Russia\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"https?://(?:www\\.)?(?:\n chetv\n |\n ctc(?:love)?\n |\n domashniy\n)\\.ru/(?:live|online)\"\"\", re.VERBOSE))\n@pluginmatcher(re.compile(r\"https?://player\\.mediavitrina\\.ru/.+/player\\.html\"))\nclass MediaVitrina(Plugin):\n _re_url_json = re.compile(r\"https://media\\.mediavitrina\\.ru/(?:proxy)?api/v3/\\w+/playlist/[\\w-]+_as_array\\.json[^\\\"']+\")\n\n def _get_streams(self):\n self.session.http.headers.update({\"Referer\": self.url})\n\n p_netloc = urlparse(self.url).netloc\n if p_netloc == \"player.mediavitrina.ru\":\n # https://player.mediavitrina.ru/\n url_player = self.url\n elif p_netloc.endswith(\"ctc.ru\"):\n # https://ctc.ru/online/\n url_player = self.session.http.get(\n \"https://ctc.ru/api/page/v1/online/\",\n schema=validate.Schema(\n validate.parse_json(),\n {\"content\": validate.all(\n [dict],\n validate.filter(lambda n: n.get(\"type\") == \"on-air\"),\n [{\"onAirLink\": validate.url(netloc=\"player.mediavitrina.ru\")}],\n validate.get((0, \"onAirLink\")),\n )},\n validate.get(\"content\"),\n ),\n )\n else:\n # https://chetv.ru/online/\n # https://ctclove.ru/online/\n # https://domashniy.ru/online/\n url_player = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//iframe[starts-with(@src,'https://player.mediavitrina.ru/')]/@src\"),\n ), acceptable_status=(200, 403, 404))\n\n if not url_player:\n return\n\n log.debug(f\"url_player={url_player}\")\n script_data = self.session.http.get(url_player, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'media.mediavitrina.ru/')]/text()\"),\n ))\n if not script_data:\n log.debug(\"invalid script_data\")\n return\n\n m = self._re_url_json.search(script_data)\n if not m:\n log.debug(\"invalid url_json\")\n return\n\n url_json = m.group(0)\n log.debug(f\"url_json={url_json}\")\n url_json = re.sub(r\"\\{\\{PLAYER_REFERER_HOSTNAME\\}\\}\", \"mediavitrina.ru\", url_json)\n url_json = re.sub(r\"\\{\\{[A-Za-z_]+\\}\\}\", \"\", url_json)\n\n res_token = self.session.http.get(\n \"https://media.mediavitrina.ru/get_token\",\n schema=validate.Schema(\n validate.parse_json(),\n {\"result\": {\"token\": str}},\n validate.get(\"result\"),\n ))\n url = self.session.http.get(\n update_qsd(url_json, qsd=res_token),\n schema=validate.Schema(\n validate.parse_json(),\n {\"hls\": [validate.url()]},\n validate.get((\"hls\", 0)),\n ))\n\n if not url:\n return\n\n if \"georestrictions\" in url:\n log.error(\"Stream is geo-restricted\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, url, name_fmt=\"{pixels}_{bitrate}\")\n\n\n__plugin__ = MediaVitrina\n", "path": "src/streamlink/plugins/mediavitrina.py"}], "after_files": [{"content": "\"\"\"\n$description Russian live streaming platform hosting various Russian live TV channels.\n$url mediavitrina.ru\n$type live\n$region Russia\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"https?://(?:www\\.)?(?:\n chetv\n |\n ctc(?:love)?\n |\n domashniy\n)\\.ru/(?:live|online)\"\"\", re.VERBOSE))\n@pluginmatcher(re.compile(r\"https?://player\\.mediavitrina\\.ru/.+/player\\.html\"))\nclass MediaVitrina(Plugin):\n _re_url_json = re.compile(r\"https://media\\.mediavitrina\\.ru/(?:proxy)?api/v3/[\\w-]+/playlist/[\\w-]+_as_array\\.json[^\\\"']+\")\n\n def _get_streams(self):\n self.session.http.headers.update({\"Referer\": self.url})\n\n p_netloc = urlparse(self.url).netloc\n if p_netloc == \"player.mediavitrina.ru\":\n # https://player.mediavitrina.ru/\n url_player = self.url\n elif p_netloc.endswith(\"ctc.ru\"):\n # https://ctc.ru/online/\n url_player = self.session.http.get(\n \"https://ctc.ru/api/page/v1/online/\",\n schema=validate.Schema(\n validate.parse_json(),\n {\"content\": validate.all(\n [dict],\n validate.filter(lambda n: n.get(\"type\") == \"on-air\"),\n [{\"onAirLink\": validate.url(netloc=\"player.mediavitrina.ru\")}],\n validate.get((0, \"onAirLink\")),\n )},\n validate.get(\"content\"),\n ),\n )\n else:\n # https://chetv.ru/online/\n # https://ctclove.ru/online/\n # https://domashniy.ru/online/\n url_player = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//iframe[starts-with(@src,'https://player.mediavitrina.ru/')]/@src\"),\n ), acceptable_status=(200, 403, 404))\n\n if not url_player:\n return\n\n log.debug(f\"url_player={url_player}\")\n script_data = self.session.http.get(url_player, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'media.mediavitrina.ru/')]/text()\"),\n ))\n if not script_data:\n log.debug(\"invalid script_data\")\n return\n\n m = self._re_url_json.search(script_data)\n if not m:\n log.debug(\"invalid url_json\")\n return\n\n url_json = m.group(0)\n log.debug(f\"url_json={url_json}\")\n url_json = re.sub(r\"\\{\\{PLAYER_REFERER_HOSTNAME\\}\\}\", \"mediavitrina.ru\", url_json)\n url_json = re.sub(r\"\\{\\{[A-Za-z_]+\\}\\}\", \"\", url_json)\n\n res_token = self.session.http.get(\n \"https://media.mediavitrina.ru/get_token\",\n schema=validate.Schema(\n validate.parse_json(),\n {\"result\": {\"token\": str}},\n validate.get(\"result\"),\n ))\n url = self.session.http.get(\n update_qsd(url_json, qsd=res_token),\n schema=validate.Schema(\n validate.parse_json(),\n {\"hls\": [validate.url()]},\n validate.get((\"hls\", 0)),\n ))\n\n if not url:\n return\n\n if \"georestrictions\" in url:\n log.error(\"Stream is geo-restricted\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, url, name_fmt=\"{pixels}_{bitrate}\")\n\n\n__plugin__ = MediaVitrina\n", "path": "src/streamlink/plugins/mediavitrina.py"}]} | 1,918 | 224 |
gh_patches_debug_24058 | rasdani/github-patches | git_diff | avocado-framework__avocado-4726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avocado crashed unexpectedly with the SIGINT
When the SIGINT is sent to the avocado in the early stages the avocado will crash.
This is happening on both runner legacy and nrunner.
```
avocado run /bin/true
JOB ID : ee66540de61211c164d9d9cb5b0e9aaf65dca8a2
JOB LOG : /home/jarichte/avocado/job-results/job-2021-05-25T16.36-ee66540/job.log
^CAvocado crashed unexpectedly:
You can find details in /var/lib/avocado/data/crashes/avocado-traceback-2021-05-25_16:36:38-_m3ikjhl.log
```
```
avocado run --test-runner=nrunner /bin/true
JOB ID : da09a60ab32ff647c79d919781f82db3543e107f
JOB LOG : /home/jarichte/avocado/job-results/job-2021-05-25T15.09-da09a60/job.log
^CAvocado crashed unexpectedly:
You can find details in /var/lib/avocado/data/crashes/avocado-traceback-2021-05-25_15:09:37-my68_dsy.log
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avocado/core/main.py`
Content:
```
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; specifically version 2 of the License.
4 #
5 # This program is distributed in the hope that it will be useful,
6 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
8 #
9 # See LICENSE for more details.
10 #
11 # Copyright: RedHat 2013-2014
12 # Author: Lucas Meneghel Rodrigues <[email protected]>
13
14
15 import os
16 import sys
17 import tempfile
18 import time
19 import traceback
20
21 try:
22 from avocado.core.settings import settings
23 except ImportError:
24 sys.stderr.write("Unable to import Avocado libraries, please verify "
25 "your installation, and if necessary reinstall it.\n")
26 # This exit code is replicated from avocado/core/exit_codes.py and not
27 # imported because we are dealing with import failures
28 sys.exit(-1)
29
30
31 def get_crash_dir():
32 config = settings.as_dict()
33 crash_dir_path = os.path.join(config.get('datadir.paths.data_dir'),
34 "crashes")
35 try:
36 os.makedirs(crash_dir_path)
37 except OSError:
38 pass
39 return crash_dir_path
40
41
42 def handle_exception(*exc_info):
43 # Print traceback if AVOCADO_LOG_DEBUG environment variable is set
44 msg = "Avocado crashed:\n" + "".join(traceback.format_exception(*exc_info))
45 msg += "\n"
46 if os.environ.get("AVOCADO_LOG_DEBUG"):
47 os.write(2, msg.encode('utf-8'))
48 # Store traceback in data_dir or TMPDIR
49 prefix = "avocado-traceback-"
50 prefix += time.strftime("%F_%T") + "-"
51 tmp, name = tempfile.mkstemp(".log", prefix, get_crash_dir())
52 os.write(tmp, msg.encode('utf-8'))
53 os.close(tmp)
54 # Print friendly message in console-like output
55 msg = ("Avocado crashed unexpectedly: %s\nYou can find details in %s\n"
56 % (exc_info[1], name))
57 os.write(2, msg.encode('utf-8'))
58 # This exit code is replicated from avocado/core/exit_codes.py and not
59 # imported because we are dealing with import failures
60 sys.exit(-1)
61
62
63 def main():
64 sys.excepthook = handle_exception
65 from avocado.core.app import AvocadoApp # pylint: disable=E0611
66
67 # Override tmp in case it's not set in env
68 for attr in ("TMP", "TEMP", "TMPDIR"):
69 if attr in os.environ:
70 break
71 else: # TMP not set by user, use /var/tmp if exists
72 # TMP not set by user in environment. Try to use /var/tmp to avoid
73 # possible problems with "/tmp" being mounted as TMPFS without the
74 # support for O_DIRECT
75 if os.path.exists("/var/tmp"):
76 os.environ["TMP"] = "/var/tmp"
77 app = AvocadoApp()
78 return app.run()
79
80
81 if __name__ == '__main__':
82 sys.exit(main())
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/avocado/core/main.py b/avocado/core/main.py
--- a/avocado/core/main.py
+++ b/avocado/core/main.py
@@ -19,6 +19,7 @@
import traceback
try:
+ from avocado.core import exit_codes
from avocado.core.settings import settings
except ImportError:
sys.stderr.write("Unable to import Avocado libraries, please verify "
@@ -51,13 +52,16 @@
tmp, name = tempfile.mkstemp(".log", prefix, get_crash_dir())
os.write(tmp, msg.encode('utf-8'))
os.close(tmp)
- # Print friendly message in console-like output
- msg = ("Avocado crashed unexpectedly: %s\nYou can find details in %s\n"
- % (exc_info[1], name))
+ if exc_info[0] is KeyboardInterrupt:
+ msg = "%s\nYou can find details in %s\n" % (exc_info[0].__doc__, name)
+ exit_code = exit_codes.AVOCADO_JOB_INTERRUPTED
+ else:
+ # Print friendly message in console-like output
+ msg = ("Avocado crashed unexpectedly: %s\nYou can find details in %s\n"
+ % (exc_info[1], name))
+ exit_code = exit_codes.AVOCADO_GENERIC_CRASH
os.write(2, msg.encode('utf-8'))
- # This exit code is replicated from avocado/core/exit_codes.py and not
- # imported because we are dealing with import failures
- sys.exit(-1)
+ sys.exit(exit_code)
def main():
| {"golden_diff": "diff --git a/avocado/core/main.py b/avocado/core/main.py\n--- a/avocado/core/main.py\n+++ b/avocado/core/main.py\n@@ -19,6 +19,7 @@\n import traceback\n \n try:\n+ from avocado.core import exit_codes\n from avocado.core.settings import settings\n except ImportError:\n sys.stderr.write(\"Unable to import Avocado libraries, please verify \"\n@@ -51,13 +52,16 @@\n tmp, name = tempfile.mkstemp(\".log\", prefix, get_crash_dir())\n os.write(tmp, msg.encode('utf-8'))\n os.close(tmp)\n- # Print friendly message in console-like output\n- msg = (\"Avocado crashed unexpectedly: %s\\nYou can find details in %s\\n\"\n- % (exc_info[1], name))\n+ if exc_info[0] is KeyboardInterrupt:\n+ msg = \"%s\\nYou can find details in %s\\n\" % (exc_info[0].__doc__, name)\n+ exit_code = exit_codes.AVOCADO_JOB_INTERRUPTED\n+ else:\n+ # Print friendly message in console-like output\n+ msg = (\"Avocado crashed unexpectedly: %s\\nYou can find details in %s\\n\"\n+ % (exc_info[1], name))\n+ exit_code = exit_codes.AVOCADO_GENERIC_CRASH\n os.write(2, msg.encode('utf-8'))\n- # This exit code is replicated from avocado/core/exit_codes.py and not\n- # imported because we are dealing with import failures\n- sys.exit(-1)\n+ sys.exit(exit_code)\n \n \n def main():\n", "issue": "Avocado crashed unexpectedly with the SIGINT\nWhen the SIGINT is sent to the avocado in the early stages the avocado will crash.\r\nThis is happening on both runner legacy and nrunner. \r\n\r\n```\r\navocado run /bin/true\r\nJOB ID : ee66540de61211c164d9d9cb5b0e9aaf65dca8a2\r\nJOB LOG : /home/jarichte/avocado/job-results/job-2021-05-25T16.36-ee66540/job.log\r\n^CAvocado crashed unexpectedly:\r\nYou can find details in /var/lib/avocado/data/crashes/avocado-traceback-2021-05-25_16:36:38-_m3ikjhl.log\r\n```\r\n\r\n```\r\navocado run --test-runner=nrunner /bin/true\r\nJOB ID : da09a60ab32ff647c79d919781f82db3543e107f\r\nJOB LOG : /home/jarichte/avocado/job-results/job-2021-05-25T15.09-da09a60/job.log\r\n^CAvocado crashed unexpectedly:\r\nYou can find details in /var/lib/avocado/data/crashes/avocado-traceback-2021-05-25_15:09:37-my68_dsy.log\r\n```\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; specifically version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: RedHat 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n\n\nimport os\nimport sys\nimport tempfile\nimport time\nimport traceback\n\ntry:\n from avocado.core.settings import settings\nexcept ImportError:\n sys.stderr.write(\"Unable to import Avocado libraries, please verify \"\n \"your installation, and if necessary reinstall it.\\n\")\n # This exit code is replicated from avocado/core/exit_codes.py and not\n # imported because we are dealing with import failures\n sys.exit(-1)\n\n\ndef get_crash_dir():\n config = settings.as_dict()\n crash_dir_path = os.path.join(config.get('datadir.paths.data_dir'),\n \"crashes\")\n try:\n os.makedirs(crash_dir_path)\n except OSError:\n pass\n return crash_dir_path\n\n\ndef handle_exception(*exc_info):\n # Print traceback if AVOCADO_LOG_DEBUG environment variable is set\n msg = \"Avocado crashed:\\n\" + \"\".join(traceback.format_exception(*exc_info))\n msg += \"\\n\"\n if os.environ.get(\"AVOCADO_LOG_DEBUG\"):\n os.write(2, msg.encode('utf-8'))\n # Store traceback in data_dir or TMPDIR\n prefix = \"avocado-traceback-\"\n prefix += time.strftime(\"%F_%T\") + \"-\"\n tmp, name = tempfile.mkstemp(\".log\", prefix, get_crash_dir())\n os.write(tmp, msg.encode('utf-8'))\n os.close(tmp)\n # Print friendly message in console-like output\n msg = (\"Avocado crashed unexpectedly: %s\\nYou can find details in %s\\n\"\n % (exc_info[1], name))\n os.write(2, msg.encode('utf-8'))\n # This exit code is replicated from avocado/core/exit_codes.py and not\n # imported because we are dealing with import failures\n sys.exit(-1)\n\n\ndef main():\n sys.excepthook = handle_exception\n from avocado.core.app import AvocadoApp # pylint: disable=E0611\n\n # Override tmp in case it's not set in env\n for attr in (\"TMP\", \"TEMP\", \"TMPDIR\"):\n if attr in os.environ:\n break\n else: # TMP not set by user, use /var/tmp if exists\n # TMP not set by user in environment. Try to use /var/tmp to avoid\n # possible problems with \"/tmp\" being mounted as TMPFS without the\n # support for O_DIRECT\n if os.path.exists(\"/var/tmp\"):\n os.environ[\"TMP\"] = \"/var/tmp\"\n app = AvocadoApp()\n return app.run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "avocado/core/main.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; specifically version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: RedHat 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n\n\nimport os\nimport sys\nimport tempfile\nimport time\nimport traceback\n\ntry:\n from avocado.core import exit_codes\n from avocado.core.settings import settings\nexcept ImportError:\n sys.stderr.write(\"Unable to import Avocado libraries, please verify \"\n \"your installation, and if necessary reinstall it.\\n\")\n # This exit code is replicated from avocado/core/exit_codes.py and not\n # imported because we are dealing with import failures\n sys.exit(-1)\n\n\ndef get_crash_dir():\n config = settings.as_dict()\n crash_dir_path = os.path.join(config.get('datadir.paths.data_dir'),\n \"crashes\")\n try:\n os.makedirs(crash_dir_path)\n except OSError:\n pass\n return crash_dir_path\n\n\ndef handle_exception(*exc_info):\n # Print traceback if AVOCADO_LOG_DEBUG environment variable is set\n msg = \"Avocado crashed:\\n\" + \"\".join(traceback.format_exception(*exc_info))\n msg += \"\\n\"\n if os.environ.get(\"AVOCADO_LOG_DEBUG\"):\n os.write(2, msg.encode('utf-8'))\n # Store traceback in data_dir or TMPDIR\n prefix = \"avocado-traceback-\"\n prefix += time.strftime(\"%F_%T\") + \"-\"\n tmp, name = tempfile.mkstemp(\".log\", prefix, get_crash_dir())\n os.write(tmp, msg.encode('utf-8'))\n os.close(tmp)\n if exc_info[0] is KeyboardInterrupt:\n msg = \"%s\\nYou can find details in %s\\n\" % (exc_info[0].__doc__, name)\n exit_code = exit_codes.AVOCADO_JOB_INTERRUPTED\n else:\n # Print friendly message in console-like output\n msg = (\"Avocado crashed unexpectedly: %s\\nYou can find details in %s\\n\"\n % (exc_info[1], name))\n exit_code = exit_codes.AVOCADO_GENERIC_CRASH\n os.write(2, msg.encode('utf-8'))\n sys.exit(exit_code)\n\n\ndef main():\n sys.excepthook = handle_exception\n from avocado.core.app import AvocadoApp # pylint: disable=E0611\n\n # Override tmp in case it's not set in env\n for attr in (\"TMP\", \"TEMP\", \"TMPDIR\"):\n if attr in os.environ:\n break\n else: # TMP not set by user, use /var/tmp if exists\n # TMP not set by user in environment. Try to use /var/tmp to avoid\n # possible problems with \"/tmp\" being mounted as TMPFS without the\n # support for O_DIRECT\n if os.path.exists(\"/var/tmp\"):\n os.environ[\"TMP\"] = \"/var/tmp\"\n app = AvocadoApp()\n return app.run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "avocado/core/main.py"}]} | 1,450 | 360 |
gh_patches_debug_48680 | rasdani/github-patches | git_diff | ethereum__web3.py-670 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider adding Chain Id to library
* Version: 4.0.0-b
* Python: 3.6.3
* OS: linux
### What was wrong?
No clear way to access known chain ids.
### How can it be fixed?
Proposed syntax
```
>>> from web3 import Chains
>>> Chains.Ropsten.id
3
```
I ran into issues here: https://web3py.readthedocs.io/en/latest/web3.eth.account.html#sign-a-contract-transaction as `buildTransaction()` requires a `chainId`. I didn't even know the chains had ids.
```
>>> unicorn_txn = unicorns.functions.transfer(
... '0xfB6916095ca1df60bB79Ce92cE3Ea74c37c5d359',
... 1,
... ).buildTransaction({
... 'chainId': 1,
... 'gas': 70000,
... 'gasPrice': w3.toWei('1', 'gwei'),
... 'nonce': nonce,
... })
```
### Maybe this will help others
According to this answer https://ethereum.stackexchange.com/a/17101/7187 the chain ids are as follows:
0: Olympic, Ethereum public pre-release testnet
1: Frontier, Homestead, Metropolis, the Ethereum public main network
1: Classic, the (un)forked public Ethereum Classic main network, chain ID 61
1: Expanse, an alternative Ethereum implementation, chain ID 2
2: Morden, the public Ethereum testnet, now Ethereum Classic testnet
3: Ropsten, the public cross-client Ethereum testnet
4: Rinkeby, the public Geth PoA testnet
42: Kovan, the public Parity PoA testnet
77: Sokol, the public POA Network testnet
99: Core, the public POA Network main network
7762959: Musicoin, the music blockchain
Consider adding Chain Id to library
* Version: 4.0.0-b
* Python: 3.6.3
* OS: linux
### What was wrong?
No clear way to access known chain ids.
### How can it be fixed?
Proposed syntax
```
>>> from web3 import Chains
>>> Chains.Ropsten.id
3
```
I ran into issues here: https://web3py.readthedocs.io/en/latest/web3.eth.account.html#sign-a-contract-transaction as `buildTransaction()` requires a `chainId`. I didn't even know the chains had ids.
```
>>> unicorn_txn = unicorns.functions.transfer(
... '0xfB6916095ca1df60bB79Ce92cE3Ea74c37c5d359',
... 1,
... ).buildTransaction({
... 'chainId': 1,
... 'gas': 70000,
... 'gasPrice': w3.toWei('1', 'gwei'),
... 'nonce': nonce,
... })
```
### Maybe this will help others
According to this answer https://ethereum.stackexchange.com/a/17101/7187 the chain ids are as follows:
0: Olympic, Ethereum public pre-release testnet
1: Frontier, Homestead, Metropolis, the Ethereum public main network
1: Classic, the (un)forked public Ethereum Classic main network, chain ID 61
1: Expanse, an alternative Ethereum implementation, chain ID 2
2: Morden, the public Ethereum testnet, now Ethereum Classic testnet
3: Ropsten, the public cross-client Ethereum testnet
4: Rinkeby, the public Geth PoA testnet
42: Kovan, the public Parity PoA testnet
77: Sokol, the public POA Network testnet
99: Core, the public POA Network main network
7762959: Musicoin, the music blockchain
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/net.py`
Content:
```
1 from web3.module import (
2 Module,
3 )
4
5
6 class Net(Module):
7 @property
8 def listening(self):
9 return self.web3.manager.request_blocking("net_listening", [])
10
11 @property
12 def peerCount(self):
13 return self.web3.manager.request_blocking("net_peerCount", [])
14
15 @property
16 def version(self):
17 return self.web3.manager.request_blocking("net_version", [])
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/net.py b/web3/net.py
--- a/web3/net.py
+++ b/web3/net.py
@@ -12,6 +12,10 @@
def peerCount(self):
return self.web3.manager.request_blocking("net_peerCount", [])
+ @property
+ def chainId(self):
+ return self.version
+
@property
def version(self):
return self.web3.manager.request_blocking("net_version", [])
| {"golden_diff": "diff --git a/web3/net.py b/web3/net.py\n--- a/web3/net.py\n+++ b/web3/net.py\n@@ -12,6 +12,10 @@\n def peerCount(self):\n return self.web3.manager.request_blocking(\"net_peerCount\", [])\n \n+ @property\n+ def chainId(self):\n+ return self.version\n+\n @property\n def version(self):\n return self.web3.manager.request_blocking(\"net_version\", [])\n", "issue": "Consider adding Chain Id to library\n* Version: 4.0.0-b\r\n* Python: 3.6.3\r\n* OS: linux\r\n\r\n### What was wrong?\r\n\r\nNo clear way to access known chain ids.\r\n\r\n### How can it be fixed?\r\n\r\nProposed syntax\r\n\r\n```\r\n>>> from web3 import Chains\r\n>>> Chains.Ropsten.id\r\n3\r\n```\r\n\r\nI ran into issues here: https://web3py.readthedocs.io/en/latest/web3.eth.account.html#sign-a-contract-transaction as `buildTransaction()` requires a `chainId`. I didn't even know the chains had ids.\r\n\r\n```\r\n>>> unicorn_txn = unicorns.functions.transfer(\r\n... '0xfB6916095ca1df60bB79Ce92cE3Ea74c37c5d359',\r\n... 1,\r\n... ).buildTransaction({\r\n... 'chainId': 1,\r\n... 'gas': 70000,\r\n... 'gasPrice': w3.toWei('1', 'gwei'),\r\n... 'nonce': nonce,\r\n... })\r\n```\r\n\r\n### Maybe this will help others\r\n\r\nAccording to this answer https://ethereum.stackexchange.com/a/17101/7187 the chain ids are as follows:\r\n\r\n0: Olympic, Ethereum public pre-release testnet\r\n1: Frontier, Homestead, Metropolis, the Ethereum public main network\r\n1: Classic, the (un)forked public Ethereum Classic main network, chain ID 61\r\n1: Expanse, an alternative Ethereum implementation, chain ID 2\r\n2: Morden, the public Ethereum testnet, now Ethereum Classic testnet\r\n3: Ropsten, the public cross-client Ethereum testnet\r\n4: Rinkeby, the public Geth PoA testnet\r\n42: Kovan, the public Parity PoA testnet\r\n77: Sokol, the public POA Network testnet\r\n99: Core, the public POA Network main network\r\n7762959: Musicoin, the music blockchain\r\n\nConsider adding Chain Id to library\n* Version: 4.0.0-b\r\n* Python: 3.6.3\r\n* OS: linux\r\n\r\n### What was wrong?\r\n\r\nNo clear way to access known chain ids.\r\n\r\n### How can it be fixed?\r\n\r\nProposed syntax\r\n\r\n```\r\n>>> from web3 import Chains\r\n>>> Chains.Ropsten.id\r\n3\r\n```\r\n\r\nI ran into issues here: https://web3py.readthedocs.io/en/latest/web3.eth.account.html#sign-a-contract-transaction as `buildTransaction()` requires a `chainId`. I didn't even know the chains had ids.\r\n\r\n```\r\n>>> unicorn_txn = unicorns.functions.transfer(\r\n... '0xfB6916095ca1df60bB79Ce92cE3Ea74c37c5d359',\r\n... 1,\r\n... ).buildTransaction({\r\n... 'chainId': 1,\r\n... 'gas': 70000,\r\n... 'gasPrice': w3.toWei('1', 'gwei'),\r\n... 'nonce': nonce,\r\n... })\r\n```\r\n\r\n### Maybe this will help others\r\n\r\nAccording to this answer https://ethereum.stackexchange.com/a/17101/7187 the chain ids are as follows:\r\n\r\n0: Olympic, Ethereum public pre-release testnet\r\n1: Frontier, Homestead, Metropolis, the Ethereum public main network\r\n1: Classic, the (un)forked public Ethereum Classic main network, chain ID 61\r\n1: Expanse, an alternative Ethereum implementation, chain ID 2\r\n2: Morden, the public Ethereum testnet, now Ethereum Classic testnet\r\n3: Ropsten, the public cross-client Ethereum testnet\r\n4: Rinkeby, the public Geth PoA testnet\r\n42: Kovan, the public Parity PoA testnet\r\n77: Sokol, the public POA Network testnet\r\n99: Core, the public POA Network main network\r\n7762959: Musicoin, the music blockchain\r\n\n", "before_files": [{"content": "from web3.module import (\n Module,\n)\n\n\nclass Net(Module):\n @property\n def listening(self):\n return self.web3.manager.request_blocking(\"net_listening\", [])\n\n @property\n def peerCount(self):\n return self.web3.manager.request_blocking(\"net_peerCount\", [])\n\n @property\n def version(self):\n return self.web3.manager.request_blocking(\"net_version\", [])\n", "path": "web3/net.py"}], "after_files": [{"content": "from web3.module import (\n Module,\n)\n\n\nclass Net(Module):\n @property\n def listening(self):\n return self.web3.manager.request_blocking(\"net_listening\", [])\n\n @property\n def peerCount(self):\n return self.web3.manager.request_blocking(\"net_peerCount\", [])\n\n @property\n def chainId(self):\n return self.version\n\n @property\n def version(self):\n return self.web3.manager.request_blocking(\"net_version\", [])\n", "path": "web3/net.py"}]} | 1,265 | 103 |
gh_patches_debug_36617 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-406 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use numpy.load and save instead of pickle
As @StanczakDominik put it:
> By the way, apparently pickle is unsafe due to allowing arbitrary code execution, and we're now including those in Langmuir samples. @jasperbeckers do you think we could transition to numpy.save and numpy.load .npz files? We're just storing two arrays in each of those anyway, right?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/examples/plot_langmuir_analysis.py`
Content:
```
1 # coding: utf-8
2 """
3 Langmuir probe data analysis
4 ============================
5
6 Let's analyze a few Langmuir probe characteristics using the
7 `diagnostics.langmuir` subpackage. First we need to import the module and some
8 basics.
9 """
10
11 from plasmapy.diagnostics.langmuir import Characteristic, swept_probe_analysis
12 import astropy.units as u
13 import numpy as np
14 import pickle
15 import os
16
17 ######################################################
18 # The first characteristic we analyze is a simple single-probe measurement in
19 # a low (ion) temperature, low density plasma with a cylindrical probe. This
20 # allows us to utilize OML theory implemented in `swept_probe_analysis`.
21 # The data has been preprocessed with some smoothing, which allows us to obtain
22 # a Electron Energy Distribution Function (EEDF) as well.
23
24 # Load the bias and current values stored in the .p pickle file.
25 path = os.path.join("langmuir_samples", "Beckers2017.p")
26 bias, current = pickle.load(open(path, 'rb'))
27
28 # Create the Characteristic object, taking into account the correct units
29 characteristic = Characteristic(np.array(bias) * u.V,
30 np.array(current)*1e3 * u.mA)
31
32 # Calculate the cylindrical probe surface area
33 probe_length = 1.145 * u.mm
34 probe_diameter = 1.57 * u.mm
35 probe_area = (probe_length * np.pi * probe_diameter +
36 np.pi * 0.25 * probe_diameter**2)
37
38 ######################################################
39 # Now we can actually perform the analysis. Since the plasma is in Helium an
40 # ion mass number of 4 is entered. The results are visualized and the obtained
41 # EEDF is also shown.
42 print(swept_probe_analysis(characteristic,
43 probe_area, 4 * u.u,
44 visualize=True,
45 plot_EEDF=True))
46
47 ######################################################
48 # The cyan and yellow lines indicate the fitted electron and ion currents,
49 # respectively. The green line is the sum of these and agrees nicely with the
50 # data. This indicates a succesfull analysis.
51
52 ######################################################
53 # The next sample probe data is provided by David Pace. is also obtained from a low relatively ion
54 # temperature and density plasma, in Argon.
55
56 # Load the data from a file and create the Characteristic object
57 path = os.path.join("langmuir_samples", "Pace2015.p")
58 bias, current = pickle.load(open(path, 'rb'))
59 characteristic = Characteristic(np.array(bias) * u.V,
60 np.array(current) * 1e3 * u.mA)
61
62 ######################################################
63 # Initially the electrons are assumed to be Maxwellian. To check this the fit
64 # of the electron growth region will be plotted.
65 swept_probe_analysis(characteristic,
66 0.738 * u.cm**2,
67 40 * u.u,
68 bimaxwellian=False,
69 plot_electron_fit=True)
70
71 ######################################################
72 # It can be seen that this plasma is slightly bi-Maxwellian, as there are two
73 # distinct slopes in the exponential section. The analysis is now performed
74 # with bimaxwellian set to True, which yields improved results.
75 print(swept_probe_analysis(characteristic,
76 0.738 * u.cm**2,
77 40 * u.u,
78 bimaxwellian=True,
79 visualize=True,
80 plot_electron_fit=True))
81
82 ######################################################
83 # The probe current resolution of the raw data is relatively poor, but the
84 # analysis still performs well in the ion current region. The bi-Maxwellian
85 # properties are not significant but do make a difference. Check this analysis
86 # without setting `bimaxwellian` to True!
87 # This is reflected in the results, which indicate that the temperatures of
88 # the cold and hot electron population are indeed different, but relatively
89 # close.
90
91 ######################################################
92 # This Helium plasma is fully bi-Maxwellian.
93
94 # Import probe data and calculate probe surface area.
95 path = os.path.join("langmuir_samples", "Beckers2017b.p")
96 bias, current = pickle.load(open(path, 'rb'))
97 characteristic = Characteristic(np.array(bias) * u.V,
98 np.array(current) * 1e3 * u.mA)
99 probe_length = 1.145 * u.mm
100 probe_diameter = 1.57 * u.mm
101 probe_area = (probe_length * np.pi * probe_diameter +
102 np.pi * 0.25 * probe_diameter**2)
103
104 ######################################################
105 # `plot_electron_fit` is set to True to check the bi-Maxwellian properties.
106 # The fit converges nicely to the two slopes of the electron growth region.
107 print(swept_probe_analysis(characteristic,
108 probe_area,
109 4 * u.u,
110 bimaxwellian=True,
111 plot_electron_fit=True,
112 visualize=True))
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plasmapy/examples/plot_langmuir_analysis.py b/plasmapy/examples/plot_langmuir_analysis.py
--- a/plasmapy/examples/plot_langmuir_analysis.py
+++ b/plasmapy/examples/plot_langmuir_analysis.py
@@ -11,7 +11,6 @@
from plasmapy.diagnostics.langmuir import Characteristic, swept_probe_analysis
import astropy.units as u
import numpy as np
-import pickle
import os
######################################################
@@ -22,8 +21,8 @@
# a Electron Energy Distribution Function (EEDF) as well.
# Load the bias and current values stored in the .p pickle file.
-path = os.path.join("langmuir_samples", "Beckers2017.p")
-bias, current = pickle.load(open(path, 'rb'))
+path = os.path.join("langmuir_samples", "Beckers2017.npy")
+bias, current = np.load(path)
# Create the Characteristic object, taking into account the correct units
characteristic = Characteristic(np.array(bias) * u.V,
@@ -50,12 +49,12 @@
# data. This indicates a succesfull analysis.
######################################################
-# The next sample probe data is provided by David Pace. is also obtained from a low relatively ion
-# temperature and density plasma, in Argon.
+# The next sample probe data is provided by David Pace. It is also obtained
+# from a low relatively ion temperature and density plasma, in Argon.
# Load the data from a file and create the Characteristic object
-path = os.path.join("langmuir_samples", "Pace2015.p")
-bias, current = pickle.load(open(path, 'rb'))
+path = os.path.join("langmuir_samples", "Pace2015.npy")
+bias, current = np.load(path)
characteristic = Characteristic(np.array(bias) * u.V,
np.array(current) * 1e3 * u.mA)
@@ -92,8 +91,8 @@
# This Helium plasma is fully bi-Maxwellian.
# Import probe data and calculate probe surface area.
-path = os.path.join("langmuir_samples", "Beckers2017b.p")
-bias, current = pickle.load(open(path, 'rb'))
+path = os.path.join("langmuir_samples", "Beckers2017b.npy")
+bias, current = np.load(path)
characteristic = Characteristic(np.array(bias) * u.V,
np.array(current) * 1e3 * u.mA)
probe_length = 1.145 * u.mm
| {"golden_diff": "diff --git a/plasmapy/examples/plot_langmuir_analysis.py b/plasmapy/examples/plot_langmuir_analysis.py\n--- a/plasmapy/examples/plot_langmuir_analysis.py\n+++ b/plasmapy/examples/plot_langmuir_analysis.py\n@@ -11,7 +11,6 @@\n from plasmapy.diagnostics.langmuir import Characteristic, swept_probe_analysis\n import astropy.units as u\n import numpy as np\n-import pickle\n import os\n \n ######################################################\n@@ -22,8 +21,8 @@\n # a Electron Energy Distribution Function (EEDF) as well.\n \n # Load the bias and current values stored in the .p pickle file.\n-path = os.path.join(\"langmuir_samples\", \"Beckers2017.p\")\n-bias, current = pickle.load(open(path, 'rb'))\n+path = os.path.join(\"langmuir_samples\", \"Beckers2017.npy\")\n+bias, current = np.load(path)\n \n # Create the Characteristic object, taking into account the correct units\n characteristic = Characteristic(np.array(bias) * u.V,\n@@ -50,12 +49,12 @@\n # data. This indicates a succesfull analysis.\n \n ######################################################\n-# The next sample probe data is provided by David Pace. is also obtained from a low relatively ion\n-# temperature and density plasma, in Argon.\n+# The next sample probe data is provided by David Pace. It is also obtained\n+# from a low relatively ion temperature and density plasma, in Argon.\n \n # Load the data from a file and create the Characteristic object\n-path = os.path.join(\"langmuir_samples\", \"Pace2015.p\")\n-bias, current = pickle.load(open(path, 'rb'))\n+path = os.path.join(\"langmuir_samples\", \"Pace2015.npy\")\n+bias, current = np.load(path)\n characteristic = Characteristic(np.array(bias) * u.V,\n np.array(current) * 1e3 * u.mA)\n \n@@ -92,8 +91,8 @@\n # This Helium plasma is fully bi-Maxwellian.\n \n # Import probe data and calculate probe surface area.\n-path = os.path.join(\"langmuir_samples\", \"Beckers2017b.p\")\n-bias, current = pickle.load(open(path, 'rb'))\n+path = os.path.join(\"langmuir_samples\", \"Beckers2017b.npy\")\n+bias, current = np.load(path)\n characteristic = Characteristic(np.array(bias) * u.V,\n np.array(current) * 1e3 * u.mA)\n probe_length = 1.145 * u.mm\n", "issue": "Use numpy.load and save instead of pickle\nAs @StanczakDominik put it:\r\n\r\n> By the way, apparently pickle is unsafe due to allowing arbitrary code execution, and we're now including those in Langmuir samples. @jasperbeckers do you think we could transition to numpy.save and numpy.load .npz files? We're just storing two arrays in each of those anyway, right?\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"\nLangmuir probe data analysis\n============================\n\nLet's analyze a few Langmuir probe characteristics using the\n`diagnostics.langmuir` subpackage. First we need to import the module and some\nbasics.\n\"\"\"\n\nfrom plasmapy.diagnostics.langmuir import Characteristic, swept_probe_analysis\nimport astropy.units as u\nimport numpy as np\nimport pickle\nimport os\n\n######################################################\n# The first characteristic we analyze is a simple single-probe measurement in\n# a low (ion) temperature, low density plasma with a cylindrical probe. This\n# allows us to utilize OML theory implemented in `swept_probe_analysis`.\n# The data has been preprocessed with some smoothing, which allows us to obtain\n# a Electron Energy Distribution Function (EEDF) as well.\n\n# Load the bias and current values stored in the .p pickle file.\npath = os.path.join(\"langmuir_samples\", \"Beckers2017.p\")\nbias, current = pickle.load(open(path, 'rb'))\n\n# Create the Characteristic object, taking into account the correct units\ncharacteristic = Characteristic(np.array(bias) * u.V,\n np.array(current)*1e3 * u.mA)\n\n# Calculate the cylindrical probe surface area\nprobe_length = 1.145 * u.mm\nprobe_diameter = 1.57 * u.mm\nprobe_area = (probe_length * np.pi * probe_diameter +\n np.pi * 0.25 * probe_diameter**2)\n\n######################################################\n# Now we can actually perform the analysis. Since the plasma is in Helium an\n# ion mass number of 4 is entered. The results are visualized and the obtained\n# EEDF is also shown.\nprint(swept_probe_analysis(characteristic,\n probe_area, 4 * u.u,\n visualize=True,\n plot_EEDF=True))\n\n######################################################\n# The cyan and yellow lines indicate the fitted electron and ion currents,\n# respectively. The green line is the sum of these and agrees nicely with the\n# data. This indicates a succesfull analysis.\n\n######################################################\n# The next sample probe data is provided by David Pace. is also obtained from a low relatively ion\n# temperature and density plasma, in Argon.\n\n# Load the data from a file and create the Characteristic object\npath = os.path.join(\"langmuir_samples\", \"Pace2015.p\")\nbias, current = pickle.load(open(path, 'rb'))\ncharacteristic = Characteristic(np.array(bias) * u.V,\n np.array(current) * 1e3 * u.mA)\n\n######################################################\n# Initially the electrons are assumed to be Maxwellian. To check this the fit\n# of the electron growth region will be plotted.\nswept_probe_analysis(characteristic,\n 0.738 * u.cm**2,\n 40 * u.u,\n bimaxwellian=False,\n plot_electron_fit=True)\n\n######################################################\n# It can be seen that this plasma is slightly bi-Maxwellian, as there are two\n# distinct slopes in the exponential section. The analysis is now performed\n# with bimaxwellian set to True, which yields improved results.\nprint(swept_probe_analysis(characteristic,\n 0.738 * u.cm**2,\n 40 * u.u,\n bimaxwellian=True,\n visualize=True,\n plot_electron_fit=True))\n\n######################################################\n# The probe current resolution of the raw data is relatively poor, but the\n# analysis still performs well in the ion current region. The bi-Maxwellian\n# properties are not significant but do make a difference. Check this analysis\n# without setting `bimaxwellian` to True!\n# This is reflected in the results, which indicate that the temperatures of\n# the cold and hot electron population are indeed different, but relatively\n# close.\n\n######################################################\n# This Helium plasma is fully bi-Maxwellian.\n\n# Import probe data and calculate probe surface area.\npath = os.path.join(\"langmuir_samples\", \"Beckers2017b.p\")\nbias, current = pickle.load(open(path, 'rb'))\ncharacteristic = Characteristic(np.array(bias) * u.V,\n np.array(current) * 1e3 * u.mA)\nprobe_length = 1.145 * u.mm\nprobe_diameter = 1.57 * u.mm\nprobe_area = (probe_length * np.pi * probe_diameter +\n np.pi * 0.25 * probe_diameter**2)\n\n######################################################\n# `plot_electron_fit` is set to True to check the bi-Maxwellian properties.\n# The fit converges nicely to the two slopes of the electron growth region.\nprint(swept_probe_analysis(characteristic,\n probe_area,\n 4 * u.u,\n bimaxwellian=True,\n plot_electron_fit=True,\n visualize=True))\n", "path": "plasmapy/examples/plot_langmuir_analysis.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"\nLangmuir probe data analysis\n============================\n\nLet's analyze a few Langmuir probe characteristics using the\n`diagnostics.langmuir` subpackage. First we need to import the module and some\nbasics.\n\"\"\"\n\nfrom plasmapy.diagnostics.langmuir import Characteristic, swept_probe_analysis\nimport astropy.units as u\nimport numpy as np\nimport os\n\n######################################################\n# The first characteristic we analyze is a simple single-probe measurement in\n# a low (ion) temperature, low density plasma with a cylindrical probe. This\n# allows us to utilize OML theory implemented in `swept_probe_analysis`.\n# The data has been preprocessed with some smoothing, which allows us to obtain\n# a Electron Energy Distribution Function (EEDF) as well.\n\n# Load the bias and current values stored in the .p pickle file.\npath = os.path.join(\"langmuir_samples\", \"Beckers2017.npy\")\nbias, current = np.load(path)\n\n# Create the Characteristic object, taking into account the correct units\ncharacteristic = Characteristic(np.array(bias) * u.V,\n np.array(current)*1e3 * u.mA)\n\n# Calculate the cylindrical probe surface area\nprobe_length = 1.145 * u.mm\nprobe_diameter = 1.57 * u.mm\nprobe_area = (probe_length * np.pi * probe_diameter +\n np.pi * 0.25 * probe_diameter**2)\n\n######################################################\n# Now we can actually perform the analysis. Since the plasma is in Helium an\n# ion mass number of 4 is entered. The results are visualized and the obtained\n# EEDF is also shown.\nprint(swept_probe_analysis(characteristic,\n probe_area, 4 * u.u,\n visualize=True,\n plot_EEDF=True))\n\n######################################################\n# The cyan and yellow lines indicate the fitted electron and ion currents,\n# respectively. The green line is the sum of these and agrees nicely with the\n# data. This indicates a succesfull analysis.\n\n######################################################\n# The next sample probe data is provided by David Pace. It is also obtained\n# from a low relatively ion temperature and density plasma, in Argon.\n\n# Load the data from a file and create the Characteristic object\npath = os.path.join(\"langmuir_samples\", \"Pace2015.npy\")\nbias, current = np.load(path)\ncharacteristic = Characteristic(np.array(bias) * u.V,\n np.array(current) * 1e3 * u.mA)\n\n######################################################\n# Initially the electrons are assumed to be Maxwellian. To check this the fit\n# of the electron growth region will be plotted.\nswept_probe_analysis(characteristic,\n 0.738 * u.cm**2,\n 40 * u.u,\n bimaxwellian=False,\n plot_electron_fit=True)\n\n######################################################\n# It can be seen that this plasma is slightly bi-Maxwellian, as there are two\n# distinct slopes in the exponential section. The analysis is now performed\n# with bimaxwellian set to True, which yields improved results.\nprint(swept_probe_analysis(characteristic,\n 0.738 * u.cm**2,\n 40 * u.u,\n bimaxwellian=True,\n visualize=True,\n plot_electron_fit=True))\n\n######################################################\n# The probe current resolution of the raw data is relatively poor, but the\n# analysis still performs well in the ion current region. The bi-Maxwellian\n# properties are not significant but do make a difference. Check this analysis\n# without setting `bimaxwellian` to True!\n# This is reflected in the results, which indicate that the temperatures of\n# the cold and hot electron population are indeed different, but relatively\n# close.\n\n######################################################\n# This Helium plasma is fully bi-Maxwellian.\n\n# Import probe data and calculate probe surface area.\npath = os.path.join(\"langmuir_samples\", \"Beckers2017b.npy\")\nbias, current = np.load(path)\ncharacteristic = Characteristic(np.array(bias) * u.V,\n np.array(current) * 1e3 * u.mA)\nprobe_length = 1.145 * u.mm\nprobe_diameter = 1.57 * u.mm\nprobe_area = (probe_length * np.pi * probe_diameter +\n np.pi * 0.25 * probe_diameter**2)\n\n######################################################\n# `plot_electron_fit` is set to True to check the bi-Maxwellian properties.\n# The fit converges nicely to the two slopes of the electron growth region.\nprint(swept_probe_analysis(characteristic,\n probe_area,\n 4 * u.u,\n bimaxwellian=True,\n plot_electron_fit=True,\n visualize=True))\n", "path": "plasmapy/examples/plot_langmuir_analysis.py"}]} | 1,633 | 588 |
gh_patches_debug_20904 | rasdani/github-patches | git_diff | cupy__cupy-1911 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[WIP] Fix assigning from complex to float (only test)
When a user assign complex value to float array, it causes an error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/core/_ufuncs.py`
Content:
```
1 from cupy.core._kernel import create_ufunc
2
3 elementwise_copy = create_ufunc(
4 'cupy_copy',
5 ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
6 'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),
7 'out0 = out0_type(in0)', default_casting='unsafe')
8 # complex numbers requires out0 = complex<T>(in0)
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/core/_ufuncs.py b/cupy/core/_ufuncs.py
--- a/cupy/core/_ufuncs.py
+++ b/cupy/core/_ufuncs.py
@@ -1,8 +1,30 @@
from cupy.core._kernel import create_ufunc
+
+_complex_cast_copy = '''
+template<typename T, typename U>
+__device__ void cast_copy(const U& x, T& y) {y = T(x);}
+template<typename T, typename U>
+__device__ void cast_copy(const complex<U>& x, complex<T>& y) {
+ y = complex<T>(x);
+}
+template<typename T, typename U>
+__device__ void cast_copy(const complex<U>& x, T& y) {y = T(x.real());}
+'''
+
+
elementwise_copy = create_ufunc(
'cupy_copy',
('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),
- 'out0 = out0_type(in0)', default_casting='unsafe')
+ 'cast_copy(in0, out0)',
+ preamble=_complex_cast_copy, default_casting='unsafe')
+
+
+elementwise_copy_where = create_ufunc(
+ 'cupy_copy_where',
+ ('??->?', 'b?->b', 'B?->B', 'h?->h', 'H?->H', 'i?->i', 'I?->I', 'l?->l',
+ 'L?->L', 'q?->q', 'Q?->Q', 'e?->e', 'f?->f', 'd?->d', 'F?->F', 'D?->D'),
+ 'if (in1) cast_copy(in0, out0)',
+ preamble=_complex_cast_copy, default_casting='unsafe')
# complex numbers requires out0 = complex<T>(in0)
| {"golden_diff": "diff --git a/cupy/core/_ufuncs.py b/cupy/core/_ufuncs.py\n--- a/cupy/core/_ufuncs.py\n+++ b/cupy/core/_ufuncs.py\n@@ -1,8 +1,30 @@\n from cupy.core._kernel import create_ufunc\n \n+\n+_complex_cast_copy = '''\n+template<typename T, typename U>\n+__device__ void cast_copy(const U& x, T& y) {y = T(x);}\n+template<typename T, typename U>\n+__device__ void cast_copy(const complex<U>& x, complex<T>& y) {\n+ y = complex<T>(x);\n+}\n+template<typename T, typename U>\n+__device__ void cast_copy(const complex<U>& x, T& y) {y = T(x.real());}\n+'''\n+\n+\n elementwise_copy = create_ufunc(\n 'cupy_copy',\n ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',\n 'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),\n- 'out0 = out0_type(in0)', default_casting='unsafe')\n+ 'cast_copy(in0, out0)',\n+ preamble=_complex_cast_copy, default_casting='unsafe')\n+\n+\n+elementwise_copy_where = create_ufunc(\n+ 'cupy_copy_where',\n+ ('??->?', 'b?->b', 'B?->B', 'h?->h', 'H?->H', 'i?->i', 'I?->I', 'l?->l',\n+ 'L?->L', 'q?->q', 'Q?->Q', 'e?->e', 'f?->f', 'd?->d', 'F?->F', 'D?->D'),\n+ 'if (in1) cast_copy(in0, out0)',\n+ preamble=_complex_cast_copy, default_casting='unsafe')\n # complex numbers requires out0 = complex<T>(in0)\n", "issue": "[WIP] Fix assigning from complex to float (only test)\nWhen a user assign complex value to float array, it causes an error.\n", "before_files": [{"content": "from cupy.core._kernel import create_ufunc\n\nelementwise_copy = create_ufunc(\n 'cupy_copy',\n ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',\n 'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),\n 'out0 = out0_type(in0)', default_casting='unsafe')\n# complex numbers requires out0 = complex<T>(in0)\n", "path": "cupy/core/_ufuncs.py"}], "after_files": [{"content": "from cupy.core._kernel import create_ufunc\n\n\n_complex_cast_copy = '''\ntemplate<typename T, typename U>\n__device__ void cast_copy(const U& x, T& y) {y = T(x);}\ntemplate<typename T, typename U>\n__device__ void cast_copy(const complex<U>& x, complex<T>& y) {\n y = complex<T>(x);\n}\ntemplate<typename T, typename U>\n__device__ void cast_copy(const complex<U>& x, T& y) {y = T(x.real());}\n'''\n\n\nelementwise_copy = create_ufunc(\n 'cupy_copy',\n ('?->?', 'b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',\n 'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),\n 'cast_copy(in0, out0)',\n preamble=_complex_cast_copy, default_casting='unsafe')\n\n\nelementwise_copy_where = create_ufunc(\n 'cupy_copy_where',\n ('??->?', 'b?->b', 'B?->B', 'h?->h', 'H?->H', 'i?->i', 'I?->I', 'l?->l',\n 'L?->L', 'q?->q', 'Q?->Q', 'e?->e', 'f?->f', 'd?->d', 'F?->F', 'D?->D'),\n 'if (in1) cast_copy(in0, out0)',\n preamble=_complex_cast_copy, default_casting='unsafe')\n# complex numbers requires out0 = complex<T>(in0)\n", "path": "cupy/core/_ufuncs.py"}]} | 438 | 480 |
gh_patches_debug_36592 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable input normalization in SemanticSegmentationData module
## 🚀 Feature
Add the possibility to normalize Input images in SemanticSegmentationData module
### Motivation
Enable effortless normalization, as already implemented by ImageClassificationData: optionally configurable by doing:
```python
dm = SemanticSegmentationData.from_folders(
# ...
args_transforms=dict(mean=mean,std=std)
)
```
### Pitch
Change [/flash/image/segmentation/input_transform.py:43](https://github.com/Lightning-AI/lightning-flash/blob/master/flash/image/segmentation/input_transform.py#L43)
```python
@dataclass
class SemanticSegmentationInputTransform(InputTransform):
image_size: Tuple[int, int] = (128, 128)
def train_per_sample_transform(self) -> Callable:
return ApplyToKeys(
[DataKeys.INPUT, DataKeys.TARGET],
KorniaParallelTransforms(
K.geometry.Resize(self.image_size, interpolation="nearest"), K.augmentation.RandomHorizontalFlip(p=0.5)
),
)
def per_sample_transform(self) -> Callable:
return ApplyToKeys(
[DataKeys.INPUT, DataKeys.TARGET],
KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation="nearest")),
)
def predict_per_sample_transform(self) -> Callable:
return ApplyToKeys(DataKeys.INPUT, K.geometry.Resize(self.image_size, interpolation="nearest"))
```
into this
```python
@dataclass
class SemanticSegmentationInputTransform(InputTransform):
image_size: Tuple[int, int] = (128, 128)
mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)
std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)
def train_per_sample_transform(self) -> Callable:
return T.Compose(
[
ApplyToKeys(
[DataKeys.INPUT, DataKeys.TARGET],
KorniaParallelTransforms(
K.geometry.Resize(self.image_size, interpolation="nearest"),
)
),
ApplyToKeys(
[DataKeys.INPUT],
K.augmentation.Normalize(mean=mean, std=std)
),
]
)
def per_sample_transform(self) -> Callable:
return T.Compose(
[
ApplyToKeys(
[DataKeys.INPUT, DataKeys.TARGET],
KorniaParallelTransforms(
K.geometry.Resize(self.image_size, interpolation="nearest"),
)
),
ApplyToKeys(
[DataKeys.INPUT],
K.augmentation.Normalize(mean=mean, std=std)
),
]
)
def predict_per_sample_transform(self) -> Callable:
return ApplyToKeys(
DataKeys.INPUT,
K.geometry.Resize(self.image_size, interpolation="nearest"),
K.augmentation.Normalize(mean=mean, std=std)
)
```
### Alternatives
The alternative is to write a custom InputTransform object every time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/image/segmentation/input_transform.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from dataclasses import dataclass
15 from typing import Any, Callable, Dict, Tuple
16
17 from flash.core.data.io.input import DataKeys
18 from flash.core.data.io.input_transform import InputTransform
19 from flash.core.data.transforms import ApplyToKeys, kornia_collate, KorniaParallelTransforms
20 from flash.core.utilities.imports import _KORNIA_AVAILABLE, _TORCHVISION_AVAILABLE
21
22 if _KORNIA_AVAILABLE:
23 import kornia as K
24
25 if _TORCHVISION_AVAILABLE:
26 from torchvision import transforms as T
27
28
29 def prepare_target(batch: Dict[str, Any]) -> Dict[str, Any]:
30 """Convert the target mask to long and remove the channel dimension."""
31 if DataKeys.TARGET in batch:
32 batch[DataKeys.TARGET] = batch[DataKeys.TARGET].long().squeeze(1)
33 return batch
34
35
36 def remove_extra_dimensions(batch: Dict[str, Any]):
37 if isinstance(batch[DataKeys.INPUT], list):
38 assert len(batch[DataKeys.INPUT]) == 1
39 batch[DataKeys.INPUT] = batch[DataKeys.INPUT][0]
40 return batch
41
42
43 @dataclass
44 class SemanticSegmentationInputTransform(InputTransform):
45
46 image_size: Tuple[int, int] = (128, 128)
47
48 def train_per_sample_transform(self) -> Callable:
49 return ApplyToKeys(
50 [DataKeys.INPUT, DataKeys.TARGET],
51 KorniaParallelTransforms(
52 K.geometry.Resize(self.image_size, interpolation="nearest"), K.augmentation.RandomHorizontalFlip(p=0.5)
53 ),
54 )
55
56 def per_sample_transform(self) -> Callable:
57 return ApplyToKeys(
58 [DataKeys.INPUT, DataKeys.TARGET],
59 KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation="nearest")),
60 )
61
62 def predict_per_sample_transform(self) -> Callable:
63 return ApplyToKeys(DataKeys.INPUT, K.geometry.Resize(self.image_size, interpolation="nearest"))
64
65 def collate(self) -> Callable:
66 return kornia_collate
67
68 def per_batch_transform(self) -> Callable:
69 return T.Compose([prepare_target, remove_extra_dimensions])
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/image/segmentation/input_transform.py b/flash/image/segmentation/input_transform.py
--- a/flash/image/segmentation/input_transform.py
+++ b/flash/image/segmentation/input_transform.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
-from typing import Any, Callable, Dict, Tuple
+from typing import Any, Callable, Dict, Tuple, Union
from flash.core.data.io.input import DataKeys
from flash.core.data.io.input_transform import InputTransform
@@ -44,23 +44,43 @@
class SemanticSegmentationInputTransform(InputTransform):
image_size: Tuple[int, int] = (128, 128)
+ mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)
+ std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)
def train_per_sample_transform(self) -> Callable:
- return ApplyToKeys(
- [DataKeys.INPUT, DataKeys.TARGET],
- KorniaParallelTransforms(
- K.geometry.Resize(self.image_size, interpolation="nearest"), K.augmentation.RandomHorizontalFlip(p=0.5)
- ),
+ return T.Compose(
+ [
+ ApplyToKeys(
+ [DataKeys.INPUT, DataKeys.TARGET],
+ KorniaParallelTransforms(
+ K.geometry.Resize(self.image_size, interpolation="nearest"),
+ K.augmentation.RandomHorizontalFlip(p=0.5),
+ ),
+ ),
+ ApplyToKeys([DataKeys.INPUT], K.augmentation.Normalize(mean=self.mean, std=self.std)),
+ ]
)
def per_sample_transform(self) -> Callable:
- return ApplyToKeys(
- [DataKeys.INPUT, DataKeys.TARGET],
- KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation="nearest")),
+ return T.Compose(
+ [
+ ApplyToKeys(
+ [DataKeys.INPUT, DataKeys.TARGET],
+ KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation="nearest")),
+ ),
+ ApplyToKeys([DataKeys.INPUT], K.augmentation.Normalize(mean=self.mean, std=self.std)),
+ ]
)
def predict_per_sample_transform(self) -> Callable:
- return ApplyToKeys(DataKeys.INPUT, K.geometry.Resize(self.image_size, interpolation="nearest"))
+ return ApplyToKeys(
+ DataKeys.INPUT,
+ K.geometry.Resize(
+ self.image_size,
+ interpolation="nearest",
+ ),
+ K.augmentation.Normalize(mean=self.mean, std=self.std),
+ )
def collate(self) -> Callable:
return kornia_collate
| {"golden_diff": "diff --git a/flash/image/segmentation/input_transform.py b/flash/image/segmentation/input_transform.py\n--- a/flash/image/segmentation/input_transform.py\n+++ b/flash/image/segmentation/input_transform.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n from dataclasses import dataclass\n-from typing import Any, Callable, Dict, Tuple\n+from typing import Any, Callable, Dict, Tuple, Union\n \n from flash.core.data.io.input import DataKeys\n from flash.core.data.io.input_transform import InputTransform\n@@ -44,23 +44,43 @@\n class SemanticSegmentationInputTransform(InputTransform):\n \n image_size: Tuple[int, int] = (128, 128)\n+ mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n+ std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n \n def train_per_sample_transform(self) -> Callable:\n- return ApplyToKeys(\n- [DataKeys.INPUT, DataKeys.TARGET],\n- KorniaParallelTransforms(\n- K.geometry.Resize(self.image_size, interpolation=\"nearest\"), K.augmentation.RandomHorizontalFlip(p=0.5)\n- ),\n+ return T.Compose(\n+ [\n+ ApplyToKeys(\n+ [DataKeys.INPUT, DataKeys.TARGET],\n+ KorniaParallelTransforms(\n+ K.geometry.Resize(self.image_size, interpolation=\"nearest\"),\n+ K.augmentation.RandomHorizontalFlip(p=0.5),\n+ ),\n+ ),\n+ ApplyToKeys([DataKeys.INPUT], K.augmentation.Normalize(mean=self.mean, std=self.std)),\n+ ]\n )\n \n def per_sample_transform(self) -> Callable:\n- return ApplyToKeys(\n- [DataKeys.INPUT, DataKeys.TARGET],\n- KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation=\"nearest\")),\n+ return T.Compose(\n+ [\n+ ApplyToKeys(\n+ [DataKeys.INPUT, DataKeys.TARGET],\n+ KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation=\"nearest\")),\n+ ),\n+ ApplyToKeys([DataKeys.INPUT], K.augmentation.Normalize(mean=self.mean, std=self.std)),\n+ ]\n )\n \n def predict_per_sample_transform(self) -> Callable:\n- return ApplyToKeys(DataKeys.INPUT, K.geometry.Resize(self.image_size, interpolation=\"nearest\"))\n+ return ApplyToKeys(\n+ DataKeys.INPUT,\n+ K.geometry.Resize(\n+ self.image_size,\n+ interpolation=\"nearest\",\n+ ),\n+ K.augmentation.Normalize(mean=self.mean, std=self.std),\n+ )\n \n def collate(self) -> Callable:\n return kornia_collate\n", "issue": "Enable input normalization in SemanticSegmentationData module\n## \ud83d\ude80 Feature\r\nAdd the possibility to normalize Input images in SemanticSegmentationData module\r\n\r\n### Motivation\r\nEnable effortless normalization, as already implemented by ImageClassificationData: optionally configurable by doing: \r\n```python\r\ndm = SemanticSegmentationData.from_folders(\r\n # ...\r\n args_transforms=dict(mean=mean,std=std)\r\n)\r\n```\r\n\r\n### Pitch\r\nChange [/flash/image/segmentation/input_transform.py:43](https://github.com/Lightning-AI/lightning-flash/blob/master/flash/image/segmentation/input_transform.py#L43)\r\n\r\n```python\r\n\r\n@dataclass\r\nclass SemanticSegmentationInputTransform(InputTransform):\r\n\r\n image_size: Tuple[int, int] = (128, 128)\r\n\r\n def train_per_sample_transform(self) -> Callable:\r\n return ApplyToKeys(\r\n [DataKeys.INPUT, DataKeys.TARGET],\r\n KorniaParallelTransforms(\r\n K.geometry.Resize(self.image_size, interpolation=\"nearest\"), K.augmentation.RandomHorizontalFlip(p=0.5)\r\n ),\r\n )\r\n\r\n def per_sample_transform(self) -> Callable:\r\n return ApplyToKeys(\r\n [DataKeys.INPUT, DataKeys.TARGET],\r\n KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation=\"nearest\")),\r\n )\r\n\r\n def predict_per_sample_transform(self) -> Callable:\r\n return ApplyToKeys(DataKeys.INPUT, K.geometry.Resize(self.image_size, interpolation=\"nearest\"))\r\n```\r\n\r\ninto this\r\n\r\n```python\r\n@dataclass\r\nclass SemanticSegmentationInputTransform(InputTransform):\r\n\r\n image_size: Tuple[int, int] = (128, 128)\r\n mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\r\n std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\r\n\r\n\r\n def train_per_sample_transform(self) -> Callable:\r\n return T.Compose(\r\n [\r\n ApplyToKeys(\r\n [DataKeys.INPUT, DataKeys.TARGET],\r\n KorniaParallelTransforms(\r\n K.geometry.Resize(self.image_size, interpolation=\"nearest\"),\r\n )\r\n ),\r\n ApplyToKeys(\r\n [DataKeys.INPUT],\r\n K.augmentation.Normalize(mean=mean, std=std)\r\n \r\n ),\r\n ]\r\n )\r\n\r\n def per_sample_transform(self) -> Callable:\r\n return T.Compose(\r\n [\r\n ApplyToKeys(\r\n [DataKeys.INPUT, DataKeys.TARGET],\r\n KorniaParallelTransforms(\r\n K.geometry.Resize(self.image_size, interpolation=\"nearest\"),\r\n )\r\n ),\r\n ApplyToKeys(\r\n [DataKeys.INPUT],\r\n K.augmentation.Normalize(mean=mean, std=std)\r\n \r\n ),\r\n ]\r\n )\r\n\r\n def predict_per_sample_transform(self) -> Callable: \r\n return ApplyToKeys(\r\n DataKeys.INPUT, \r\n K.geometry.Resize(self.image_size, interpolation=\"nearest\"), \r\n K.augmentation.Normalize(mean=mean, std=std)\r\n )\r\n\r\n```\r\n\r\n### Alternatives\r\nThe alternative is to write a custom InputTransform object every time.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Dict, Tuple\n\nfrom flash.core.data.io.input import DataKeys\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys, kornia_collate, KorniaParallelTransforms\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE, _TORCHVISION_AVAILABLE\n\nif _KORNIA_AVAILABLE:\n import kornia as K\n\nif _TORCHVISION_AVAILABLE:\n from torchvision import transforms as T\n\n\ndef prepare_target(batch: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Convert the target mask to long and remove the channel dimension.\"\"\"\n if DataKeys.TARGET in batch:\n batch[DataKeys.TARGET] = batch[DataKeys.TARGET].long().squeeze(1)\n return batch\n\n\ndef remove_extra_dimensions(batch: Dict[str, Any]):\n if isinstance(batch[DataKeys.INPUT], list):\n assert len(batch[DataKeys.INPUT]) == 1\n batch[DataKeys.INPUT] = batch[DataKeys.INPUT][0]\n return batch\n\n\n@dataclass\nclass SemanticSegmentationInputTransform(InputTransform):\n\n image_size: Tuple[int, int] = (128, 128)\n\n def train_per_sample_transform(self) -> Callable:\n return ApplyToKeys(\n [DataKeys.INPUT, DataKeys.TARGET],\n KorniaParallelTransforms(\n K.geometry.Resize(self.image_size, interpolation=\"nearest\"), K.augmentation.RandomHorizontalFlip(p=0.5)\n ),\n )\n\n def per_sample_transform(self) -> Callable:\n return ApplyToKeys(\n [DataKeys.INPUT, DataKeys.TARGET],\n KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation=\"nearest\")),\n )\n\n def predict_per_sample_transform(self) -> Callable:\n return ApplyToKeys(DataKeys.INPUT, K.geometry.Resize(self.image_size, interpolation=\"nearest\"))\n\n def collate(self) -> Callable:\n return kornia_collate\n\n def per_batch_transform(self) -> Callable:\n return T.Compose([prepare_target, remove_extra_dimensions])\n", "path": "flash/image/segmentation/input_transform.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Dict, Tuple, Union\n\nfrom flash.core.data.io.input import DataKeys\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys, kornia_collate, KorniaParallelTransforms\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE, _TORCHVISION_AVAILABLE\n\nif _KORNIA_AVAILABLE:\n import kornia as K\n\nif _TORCHVISION_AVAILABLE:\n from torchvision import transforms as T\n\n\ndef prepare_target(batch: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Convert the target mask to long and remove the channel dimension.\"\"\"\n if DataKeys.TARGET in batch:\n batch[DataKeys.TARGET] = batch[DataKeys.TARGET].long().squeeze(1)\n return batch\n\n\ndef remove_extra_dimensions(batch: Dict[str, Any]):\n if isinstance(batch[DataKeys.INPUT], list):\n assert len(batch[DataKeys.INPUT]) == 1\n batch[DataKeys.INPUT] = batch[DataKeys.INPUT][0]\n return batch\n\n\n@dataclass\nclass SemanticSegmentationInputTransform(InputTransform):\n\n image_size: Tuple[int, int] = (128, 128)\n mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n\n def train_per_sample_transform(self) -> Callable:\n return T.Compose(\n [\n ApplyToKeys(\n [DataKeys.INPUT, DataKeys.TARGET],\n KorniaParallelTransforms(\n K.geometry.Resize(self.image_size, interpolation=\"nearest\"),\n K.augmentation.RandomHorizontalFlip(p=0.5),\n ),\n ),\n ApplyToKeys([DataKeys.INPUT], K.augmentation.Normalize(mean=self.mean, std=self.std)),\n ]\n )\n\n def per_sample_transform(self) -> Callable:\n return T.Compose(\n [\n ApplyToKeys(\n [DataKeys.INPUT, DataKeys.TARGET],\n KorniaParallelTransforms(K.geometry.Resize(self.image_size, interpolation=\"nearest\")),\n ),\n ApplyToKeys([DataKeys.INPUT], K.augmentation.Normalize(mean=self.mean, std=self.std)),\n ]\n )\n\n def predict_per_sample_transform(self) -> Callable:\n return ApplyToKeys(\n DataKeys.INPUT,\n K.geometry.Resize(\n self.image_size,\n interpolation=\"nearest\",\n ),\n K.augmentation.Normalize(mean=self.mean, std=self.std),\n )\n\n def collate(self) -> Callable:\n return kornia_collate\n\n def per_batch_transform(self) -> Callable:\n return T.Compose([prepare_target, remove_extra_dimensions])\n", "path": "flash/image/segmentation/input_transform.py"}]} | 1,654 | 643 |
gh_patches_debug_27268 | rasdani/github-patches | git_diff | fidals__shopelectro-1023 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update prices separately
We update price files only after successful catalog data update. Sometimes we have struggle with data update, but we still need to update price files, otherwise we will get penalties from aggregators.
We should make the price files update independent of the catalog data update.
We can try these approaches:
1) Update files in separate celery cron task
2) Update files in finally block of update_catalog task
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/tasks.py`
Content:
```
1 from contextlib import contextmanager
2
3 from django.conf import settings
4 from django.core.management import call_command
5 from selenium.common.exceptions import WebDriverException
6
7 from shopelectro import selenium
8 from shopelectro.celery import app
9 from shopelectro.report import TelegramReport
10 from shopelectro.models import CategoryPage
11 from shopelectro.management.commands._update_catalog import utils
12
13
14 @contextmanager
15 def report():
16 try:
17 yield
18 except Exception as error:
19 utils.report(str(error))
20 raise error
21
22
23 @app.task
24 def generate_price_files():
25 with report():
26 call_command('price')
27 print('Generate prices complete.')
28
29
30 @app.task
31 def generate_excel_file():
32 with report():
33 call_command('excel')
34 print('Generate excel complete.')
35
36
37 @app.task
38 def collect_static():
39 with report():
40 call_command('collectstatic', '--noinput')
41
42
43 @app.task
44 def update_catalog_command():
45 with report():
46 call_command('update_catalog')
47
48
49 @app.task
50 def update_default_templates():
51 with report():
52 call_command('update_default_templates')
53
54
55 @app.task(autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)
56 def update_catalog():
57 # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap
58 return [
59 update_catalog_command(),
60 update_default_templates(),
61 generate_price_files(),
62 generate_excel_file(),
63 collect_static()
64 ]
65
66
67 @app.task(
68 bind=True,
69 autoretry_for=(WebDriverException, AssertionError),
70 retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},
71 )
72 def check_purchase(self):
73 try:
74 with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:
75 category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
76 category_page.load()
77 category_page.add_to_cart()
78
79 order_page = selenium.OrderPage(driver)
80 order_page.load()
81 order_page.fill_contacts()
82 order_page.make_order()
83
84 success_page = selenium.SuccessPage(driver)
85 assert success_page.is_success()
86 except (WebDriverException, AssertionError) as err:
87 if self.request.retries + 1 > self.max_retries:
88 # report on the last attempt
89 TelegramReport().send(f'Can\'t buy a product. Got the error: {err}')
90 raise err
91
```
Path: `shopelectro/celery.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2 from datetime import timedelta
3 import os
4
5 from celery import Celery
6 from kombu import Exchange, Queue
7
8 # set the default Django settings module for the 'celery' program.
9 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')
10
11 app = Celery('shopelectro')
12
13 # Exchanges
14 default_exchange = Exchange('default', type='direct')
15 utils_exchange = Exchange('utils', type='direct')
16
17 # http://docs.celeryproject.org/en/latest/userguide/tasks.html
18 task_queues = (
19 Queue(
20 name='default',
21 exchange=default_exchange,
22 routing_key='default',
23 ),
24 Queue(
25 name='mail',
26 exchange=utils_exchange,
27 routing_key='utils.mail',
28 ),
29 Queue(
30 name='command',
31 exchange=utils_exchange,
32 routing_key='utils.command',
33 )
34 )
35
36 # http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html
37 beat_schedule = {
38 'update-catalog': {
39 'task': 'shopelectro.tasks.update_catalog',
40 'schedule': timedelta(hours=2).total_seconds(),
41 },
42 'check-purchase': {
43 'task': 'shopelectro.tasks.check_purchase',
44 'schedule': timedelta(days=1).total_seconds(),
45 },
46 }
47
48 # http://docs.celeryproject.org/en/master/userguide/routing.html
49 task_routes = {
50 'shopelectro.tasks.update_catalog': {
51 'queue': 'command',
52 'routing_key': 'utils.command',
53 'priority': 30,
54 },
55 'shopelectro.tasks.check_purchase': {
56 'queue': 'command',
57 'routing_key': 'utils.command',
58 'priority': 20,
59 },
60 'ecommerce.tasks.send_mail': {
61 'queue': 'mail',
62 'routing_key': 'utils.mail',
63 'priority': 50,
64 },
65 }
66
67 # Using a string here means the worker don't have to serialize
68 # the configuration object to child processes.
69 # - namespace='CELERY' means all celery-related configuration keys
70 # should have a `CELERY_` prefix.
71 app.config_from_object('django.conf:settings', namespace='CELERY')
72
73 # http://docs.celeryproject.org/en/latest/userguide/configuration.html
74
75 BROCKER_URL = (
76 f'amqp://{os.environ["RABBITMQ_DEFAULT_USER"]}:{os.environ["RABBITMQ_DEFAULT_PASS"]}'
77 f'@{os.environ["RABBITMQ_URL"]}:{os.environ["RABBITMQ_PORT"]}/'
78 )
79 app.conf.update(
80 broker_url=BROCKER_URL,
81 broker_heartbeat=30,
82 task_acks_late=True,
83 task_default_queue='default',
84 task_default_exchange='default',
85 task_default_routing_key='default',
86 worker_pool_restarts=True,
87 task_routes=task_routes,
88 task_queues=task_queues,
89 beat_schedule=beat_schedule,
90 worker_max_memory_per_child=200000, # after 250MB will restart
91 )
92
93 # Load task modules from all registered Django app configs.
94 app.autodiscover_tasks()
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/celery.py b/shopelectro/celery.py
--- a/shopelectro/celery.py
+++ b/shopelectro/celery.py
@@ -39,6 +39,10 @@
'task': 'shopelectro.tasks.update_catalog',
'schedule': timedelta(hours=2).total_seconds(),
},
+ 'update-prices': {
+ 'task': 'shopelectro.tasks.update_prices',
+ 'schedule': timedelta(hours=1).total_seconds(),
+ },
'check-purchase': {
'task': 'shopelectro.tasks.check_purchase',
'schedule': timedelta(days=1).total_seconds(),
@@ -52,6 +56,11 @@
'routing_key': 'utils.command',
'priority': 30,
},
+ 'shopelectro.tasks.update_prices': {
+ 'queue': 'command',
+ 'routing_key': 'utils.command',
+ 'priority': 50,
+ },
'shopelectro.tasks.check_purchase': {
'queue': 'command',
'routing_key': 'utils.command',
diff --git a/shopelectro/tasks.py b/shopelectro/tasks.py
--- a/shopelectro/tasks.py
+++ b/shopelectro/tasks.py
@@ -58,9 +58,16 @@
return [
update_catalog_command(),
update_default_templates(),
+ collect_static(),
+ ]
+
+
[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)
+def update_prices():
+ return [
generate_price_files(),
generate_excel_file(),
- collect_static()
+ collect_static(),
]
| {"golden_diff": "diff --git a/shopelectro/celery.py b/shopelectro/celery.py\n--- a/shopelectro/celery.py\n+++ b/shopelectro/celery.py\n@@ -39,6 +39,10 @@\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n+ 'update-prices': {\n+ 'task': 'shopelectro.tasks.update_prices',\n+ 'schedule': timedelta(hours=1).total_seconds(),\n+ },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n@@ -52,6 +56,11 @@\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n+ 'shopelectro.tasks.update_prices': {\n+ 'queue': 'command',\n+ 'routing_key': 'utils.command',\n+ 'priority': 50,\n+ },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\ndiff --git a/shopelectro/tasks.py b/shopelectro/tasks.py\n--- a/shopelectro/tasks.py\n+++ b/shopelectro/tasks.py\n@@ -58,9 +58,16 @@\n return [\n update_catalog_command(),\n update_default_templates(),\n+ collect_static(),\n+ ]\n+\n+\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\n+def update_prices():\n+ return [\n generate_price_files(),\n generate_excel_file(),\n- collect_static()\n+ collect_static(),\n ]\n", "issue": "Update prices separately\nWe update price files only after successful catalog data update. Sometimes we have struggle with data update, but we still need to update price files, otherwise we will get penalties from aggregators.\r\n\r\nWe should make the price files update independent of the catalog data update.\r\nWe can try these approaches:\r\n1) Update files in separate celery cron task\r\n2) Update files in finally block of update_catalog task \n", "before_files": [{"content": "from contextlib import contextmanager\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom selenium.common.exceptions import WebDriverException\n\nfrom shopelectro import selenium\nfrom shopelectro.celery import app\nfrom shopelectro.report import TelegramReport\nfrom shopelectro.models import CategoryPage\nfrom shopelectro.management.commands._update_catalog import utils\n\n\n@contextmanager\ndef report():\n try:\n yield\n except Exception as error:\n utils.report(str(error))\n raise error\n\n\[email protected]\ndef generate_price_files():\n with report():\n call_command('price')\n print('Generate prices complete.')\n\n\[email protected]\ndef generate_excel_file():\n with report():\n call_command('excel')\n print('Generate excel complete.')\n\n\[email protected]\ndef collect_static():\n with report():\n call_command('collectstatic', '--noinput')\n\n\[email protected]\ndef update_catalog_command():\n with report():\n call_command('update_catalog')\n\n\[email protected]\ndef update_default_templates():\n with report():\n call_command('update_default_templates')\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_catalog():\n # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap\n return [\n update_catalog_command(),\n update_default_templates(),\n generate_price_files(),\n generate_excel_file(),\n collect_static()\n ]\n\n\[email protected](\n bind=True,\n autoretry_for=(WebDriverException, AssertionError),\n retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},\n)\ndef check_purchase(self):\n try:\n with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:\n category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n category_page.load()\n category_page.add_to_cart()\n\n order_page = selenium.OrderPage(driver)\n order_page.load()\n order_page.fill_contacts()\n order_page.make_order()\n\n success_page = selenium.SuccessPage(driver)\n assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n TelegramReport().send(f'Can\\'t buy a product. Got the error: {err}')\n raise err\n", "path": "shopelectro/tasks.py"}, {"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 20,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n worker_pool_restarts=True,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n worker_max_memory_per_child=200000, # after 250MB will restart\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}], "after_files": [{"content": "from contextlib import contextmanager\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom selenium.common.exceptions import WebDriverException\n\nfrom shopelectro import selenium\nfrom shopelectro.celery import app\nfrom shopelectro.report import TelegramReport\nfrom shopelectro.models import CategoryPage\nfrom shopelectro.management.commands._update_catalog import utils\n\n\n@contextmanager\ndef report():\n try:\n yield\n except Exception as error:\n utils.report(str(error))\n raise error\n\n\[email protected]\ndef generate_price_files():\n with report():\n call_command('price')\n print('Generate prices complete.')\n\n\[email protected]\ndef generate_excel_file():\n with report():\n call_command('excel')\n print('Generate excel complete.')\n\n\[email protected]\ndef collect_static():\n with report():\n call_command('collectstatic', '--noinput')\n\n\[email protected]\ndef update_catalog_command():\n with report():\n call_command('update_catalog')\n\n\[email protected]\ndef update_default_templates():\n with report():\n call_command('update_default_templates')\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_catalog():\n # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap\n return [\n update_catalog_command(),\n update_default_templates(),\n collect_static(),\n ]\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_prices():\n return [\n generate_price_files(),\n generate_excel_file(),\n collect_static(),\n ]\n\n\[email protected](\n bind=True,\n autoretry_for=(WebDriverException, AssertionError),\n retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},\n)\ndef check_purchase(self):\n try:\n with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:\n category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n category_page.load()\n category_page.add_to_cart()\n\n order_page = selenium.OrderPage(driver)\n order_page.load()\n order_page.fill_contacts()\n order_page.make_order()\n\n success_page = selenium.SuccessPage(driver)\n assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n TelegramReport().send(f'Can\\'t buy a product. Got the error: {err}')\n raise err\n", "path": "shopelectro/tasks.py"}, {"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n 'update-prices': {\n 'task': 'shopelectro.tasks.update_prices',\n 'schedule': timedelta(hours=1).total_seconds(),\n },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'shopelectro.tasks.update_prices': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 50,\n },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 20,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n worker_pool_restarts=True,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n worker_max_memory_per_child=200000, # after 250MB will restart\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}]} | 1,930 | 400 |
gh_patches_debug_2744 | rasdani/github-patches | git_diff | pulp__pulpcore-3381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Export is not locking on the exported repositories
SSIA
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/viewsets/exporter.py`
Content:
```
1 from django_filters.rest_framework import filters
2
3 from drf_spectacular.utils import extend_schema
4 from rest_framework import mixins
5
6 from pulpcore.app.models import (
7 Export,
8 Exporter,
9 FilesystemExport,
10 FilesystemExporter,
11 Publication,
12 PulpExport,
13 PulpExporter,
14 RepositoryVersion,
15 )
16
17 from pulpcore.app.serializers import (
18 AsyncOperationResponseSerializer,
19 ExportSerializer,
20 ExporterSerializer,
21 FilesystemExporterSerializer,
22 FilesystemExportSerializer,
23 PulpExporterSerializer,
24 PulpExportSerializer,
25 )
26
27 from pulpcore.app.tasks.export import fs_publication_export, fs_repo_version_export, pulp_export
28
29 from pulpcore.app.viewsets import (
30 AsyncRemoveMixin,
31 AsyncUpdateMixin,
32 BaseFilterSet,
33 NamedModelViewSet,
34 )
35 from pulpcore.app.viewsets.base import NAME_FILTER_OPTIONS
36 from pulpcore.plugin.tasking import dispatch
37 from pulpcore.app.response import OperationPostponedResponse
38
39
40 class ExporterFilter(BaseFilterSet):
41 """
42 Plugin file system exporter filter should:
43 - inherit from this class
44 - add any specific filters if needed
45 - define a `Meta` class which should:
46 - specify a plugin remote model for which filter is defined
47 - extend `fields` with specific ones
48 """
49
50 name = filters.CharFilter()
51
52 class Meta:
53 model = Exporter
54 fields = {
55 "name": NAME_FILTER_OPTIONS,
56 }
57
58
59 class ExporterViewSet(
60 NamedModelViewSet,
61 mixins.CreateModelMixin,
62 AsyncUpdateMixin,
63 mixins.RetrieveModelMixin,
64 mixins.ListModelMixin,
65 AsyncRemoveMixin,
66 ):
67 """
68 ViewSet for viewing exporters.
69 """
70
71 queryset = Exporter.objects.all()
72 serializer_class = ExporterSerializer
73 endpoint_name = "exporters"
74 router_lookup = "exporter"
75 filterset_class = ExporterFilter
76
77
78 class PulpExporterViewSet(ExporterViewSet):
79 """
80 ViewSet for viewing PulpExporters.
81 """
82
83 endpoint_name = "pulp"
84 serializer_class = PulpExporterSerializer
85 queryset = PulpExporter.objects.all()
86
87
88 class FilesystemExporterViewSet(ExporterViewSet):
89 """
90 Endpoint for managing FilesystemExporters. FilesystemExporters are provided as a tech preview.
91 """
92
93 endpoint_name = "filesystem"
94 serializer_class = FilesystemExporterSerializer
95 queryset = FilesystemExporter.objects.all()
96
97
98 class ExportViewSet(
99 NamedModelViewSet,
100 mixins.CreateModelMixin,
101 mixins.RetrieveModelMixin,
102 mixins.ListModelMixin,
103 mixins.DestroyModelMixin,
104 ):
105 """
106 ViewSet for viewing exports from an Exporter.
107 """
108
109 endpoint_name = "exports"
110 nest_prefix = "exporters"
111 router_lookup = "export"
112 lookup_field = "pk"
113 parent_lookup_kwargs = {"exporter_pk": "exporter__pk"}
114 serializer_class = ExportSerializer
115 queryset = Export.objects.all()
116 parent_viewset = ExporterViewSet
117
118
119 class PulpExportViewSet(ExportViewSet):
120 """
121 ViewSet for viewing exports from a PulpExporter.
122 """
123
124 parent_viewset = PulpExporterViewSet
125 serializer_class = PulpExportSerializer
126 queryset = PulpExport.objects.all()
127
128 @extend_schema(
129 request=PulpExportSerializer,
130 description="Trigger an asynchronous task to export a set of repositories",
131 responses={202: AsyncOperationResponseSerializer},
132 )
133 def create(self, request, exporter_pk):
134 """
135 Generates a Task to export the set of repositories assigned to a specific PulpExporter.
136 """
137 # Validate Exporter
138 exporter = PulpExporter.objects.get(pk=exporter_pk).cast()
139 ExporterSerializer.validate_path(exporter.path, check_is_dir=True)
140
141 # Validate Export
142 serializer = PulpExportSerializer(data=request.data, context={"exporter": exporter})
143 serializer.is_valid(raise_exception=True)
144
145 # Invoke the export
146 task = dispatch(
147 pulp_export,
148 exclusive_resources=[exporter],
149 kwargs={"exporter_pk": str(exporter.pk), "params": request.data},
150 )
151
152 return OperationPostponedResponse(task, request)
153
154
155 class FilesystemExportViewSet(ExportViewSet):
156 """
157 Endpoint for managing FilesystemExports. This endpoint is provided as a tech preview.
158 """
159
160 parent_viewset = FilesystemExporterViewSet
161 serializer_class = FilesystemExportSerializer
162 queryset = FilesystemExport.objects.all()
163
164 @extend_schema(
165 request=FilesystemExportSerializer,
166 description="Trigger an asynchronous task to export files to the filesystem",
167 responses={202: AsyncOperationResponseSerializer},
168 )
169 def create(self, request, exporter_pk):
170 """
171 Generates a Task to export files to the filesystem.
172 """
173 # Validate Exporter
174 exporter = FilesystemExporter.objects.get(pk=exporter_pk).cast()
175 ExporterSerializer.validate_path(exporter.path, check_is_dir=True)
176
177 # Validate Export
178 serializer = FilesystemExportSerializer(data=request.data, context={"exporter": exporter})
179 serializer.is_valid(raise_exception=True)
180
181 if request.data.get("publication"):
182 publication = self.get_resource(request.data["publication"], Publication)
183
184 task = dispatch(
185 fs_publication_export,
186 exclusive_resources=[exporter],
187 kwargs={"exporter_pk": exporter.pk, "publication_pk": publication.pk},
188 )
189 else:
190 repo_version = self.get_resource(request.data["repository_version"], RepositoryVersion)
191
192 task = dispatch(
193 fs_repo_version_export,
194 exclusive_resources=[exporter],
195 kwargs={"exporter_pk": str(exporter.pk), "repo_version_pk": repo_version.pk},
196 )
197
198 return OperationPostponedResponse(task, request)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/viewsets/exporter.py b/pulpcore/app/viewsets/exporter.py
--- a/pulpcore/app/viewsets/exporter.py
+++ b/pulpcore/app/viewsets/exporter.py
@@ -146,6 +146,7 @@
task = dispatch(
pulp_export,
exclusive_resources=[exporter],
+ shared_resources=exporter.repositories.all(),
kwargs={"exporter_pk": str(exporter.pk), "params": request.data},
)
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/exporter.py b/pulpcore/app/viewsets/exporter.py\n--- a/pulpcore/app/viewsets/exporter.py\n+++ b/pulpcore/app/viewsets/exporter.py\n@@ -146,6 +146,7 @@\n task = dispatch(\n pulp_export,\n exclusive_resources=[exporter],\n+ shared_resources=exporter.repositories.all(),\n kwargs={\"exporter_pk\": str(exporter.pk), \"params\": request.data},\n )\n", "issue": "Export is not locking on the exported repositories\nSSIA\n", "before_files": [{"content": "from django_filters.rest_framework import filters\n\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\n\nfrom pulpcore.app.models import (\n Export,\n Exporter,\n FilesystemExport,\n FilesystemExporter,\n Publication,\n PulpExport,\n PulpExporter,\n RepositoryVersion,\n)\n\nfrom pulpcore.app.serializers import (\n AsyncOperationResponseSerializer,\n ExportSerializer,\n ExporterSerializer,\n FilesystemExporterSerializer,\n FilesystemExportSerializer,\n PulpExporterSerializer,\n PulpExportSerializer,\n)\n\nfrom pulpcore.app.tasks.export import fs_publication_export, fs_repo_version_export, pulp_export\n\nfrom pulpcore.app.viewsets import (\n AsyncRemoveMixin,\n AsyncUpdateMixin,\n BaseFilterSet,\n NamedModelViewSet,\n)\nfrom pulpcore.app.viewsets.base import NAME_FILTER_OPTIONS\nfrom pulpcore.plugin.tasking import dispatch\nfrom pulpcore.app.response import OperationPostponedResponse\n\n\nclass ExporterFilter(BaseFilterSet):\n \"\"\"\n Plugin file system exporter filter should:\n - inherit from this class\n - add any specific filters if needed\n - define a `Meta` class which should:\n - specify a plugin remote model for which filter is defined\n - extend `fields` with specific ones\n \"\"\"\n\n name = filters.CharFilter()\n\n class Meta:\n model = Exporter\n fields = {\n \"name\": NAME_FILTER_OPTIONS,\n }\n\n\nclass ExporterViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n AsyncUpdateMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n AsyncRemoveMixin,\n):\n \"\"\"\n ViewSet for viewing exporters.\n \"\"\"\n\n queryset = Exporter.objects.all()\n serializer_class = ExporterSerializer\n endpoint_name = \"exporters\"\n router_lookup = \"exporter\"\n filterset_class = ExporterFilter\n\n\nclass PulpExporterViewSet(ExporterViewSet):\n \"\"\"\n ViewSet for viewing PulpExporters.\n \"\"\"\n\n endpoint_name = \"pulp\"\n serializer_class = PulpExporterSerializer\n queryset = PulpExporter.objects.all()\n\n\nclass FilesystemExporterViewSet(ExporterViewSet):\n \"\"\"\n Endpoint for managing FilesystemExporters. FilesystemExporters are provided as a tech preview.\n \"\"\"\n\n endpoint_name = \"filesystem\"\n serializer_class = FilesystemExporterSerializer\n queryset = FilesystemExporter.objects.all()\n\n\nclass ExportViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n):\n \"\"\"\n ViewSet for viewing exports from an Exporter.\n \"\"\"\n\n endpoint_name = \"exports\"\n nest_prefix = \"exporters\"\n router_lookup = \"export\"\n lookup_field = \"pk\"\n parent_lookup_kwargs = {\"exporter_pk\": \"exporter__pk\"}\n serializer_class = ExportSerializer\n queryset = Export.objects.all()\n parent_viewset = ExporterViewSet\n\n\nclass PulpExportViewSet(ExportViewSet):\n \"\"\"\n ViewSet for viewing exports from a PulpExporter.\n \"\"\"\n\n parent_viewset = PulpExporterViewSet\n serializer_class = PulpExportSerializer\n queryset = PulpExport.objects.all()\n\n @extend_schema(\n request=PulpExportSerializer,\n description=\"Trigger an asynchronous task to export a set of repositories\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request, exporter_pk):\n \"\"\"\n Generates a Task to export the set of repositories assigned to a specific PulpExporter.\n \"\"\"\n # Validate Exporter\n exporter = PulpExporter.objects.get(pk=exporter_pk).cast()\n ExporterSerializer.validate_path(exporter.path, check_is_dir=True)\n\n # Validate Export\n serializer = PulpExportSerializer(data=request.data, context={\"exporter\": exporter})\n serializer.is_valid(raise_exception=True)\n\n # Invoke the export\n task = dispatch(\n pulp_export,\n exclusive_resources=[exporter],\n kwargs={\"exporter_pk\": str(exporter.pk), \"params\": request.data},\n )\n\n return OperationPostponedResponse(task, request)\n\n\nclass FilesystemExportViewSet(ExportViewSet):\n \"\"\"\n Endpoint for managing FilesystemExports. This endpoint is provided as a tech preview.\n \"\"\"\n\n parent_viewset = FilesystemExporterViewSet\n serializer_class = FilesystemExportSerializer\n queryset = FilesystemExport.objects.all()\n\n @extend_schema(\n request=FilesystemExportSerializer,\n description=\"Trigger an asynchronous task to export files to the filesystem\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request, exporter_pk):\n \"\"\"\n Generates a Task to export files to the filesystem.\n \"\"\"\n # Validate Exporter\n exporter = FilesystemExporter.objects.get(pk=exporter_pk).cast()\n ExporterSerializer.validate_path(exporter.path, check_is_dir=True)\n\n # Validate Export\n serializer = FilesystemExportSerializer(data=request.data, context={\"exporter\": exporter})\n serializer.is_valid(raise_exception=True)\n\n if request.data.get(\"publication\"):\n publication = self.get_resource(request.data[\"publication\"], Publication)\n\n task = dispatch(\n fs_publication_export,\n exclusive_resources=[exporter],\n kwargs={\"exporter_pk\": exporter.pk, \"publication_pk\": publication.pk},\n )\n else:\n repo_version = self.get_resource(request.data[\"repository_version\"], RepositoryVersion)\n\n task = dispatch(\n fs_repo_version_export,\n exclusive_resources=[exporter],\n kwargs={\"exporter_pk\": str(exporter.pk), \"repo_version_pk\": repo_version.pk},\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/exporter.py"}], "after_files": [{"content": "from django_filters.rest_framework import filters\n\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\n\nfrom pulpcore.app.models import (\n Export,\n Exporter,\n FilesystemExport,\n FilesystemExporter,\n Publication,\n PulpExport,\n PulpExporter,\n RepositoryVersion,\n)\n\nfrom pulpcore.app.serializers import (\n AsyncOperationResponseSerializer,\n ExportSerializer,\n ExporterSerializer,\n FilesystemExporterSerializer,\n FilesystemExportSerializer,\n PulpExporterSerializer,\n PulpExportSerializer,\n)\n\nfrom pulpcore.app.tasks.export import fs_publication_export, fs_repo_version_export, pulp_export\n\nfrom pulpcore.app.viewsets import (\n AsyncRemoveMixin,\n AsyncUpdateMixin,\n BaseFilterSet,\n NamedModelViewSet,\n)\nfrom pulpcore.app.viewsets.base import NAME_FILTER_OPTIONS\nfrom pulpcore.plugin.tasking import dispatch\nfrom pulpcore.app.response import OperationPostponedResponse\n\n\nclass ExporterFilter(BaseFilterSet):\n \"\"\"\n Plugin file system exporter filter should:\n - inherit from this class\n - add any specific filters if needed\n - define a `Meta` class which should:\n - specify a plugin remote model for which filter is defined\n - extend `fields` with specific ones\n \"\"\"\n\n name = filters.CharFilter()\n\n class Meta:\n model = Exporter\n fields = {\n \"name\": NAME_FILTER_OPTIONS,\n }\n\n\nclass ExporterViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n AsyncUpdateMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n AsyncRemoveMixin,\n):\n \"\"\"\n ViewSet for viewing exporters.\n \"\"\"\n\n queryset = Exporter.objects.all()\n serializer_class = ExporterSerializer\n endpoint_name = \"exporters\"\n router_lookup = \"exporter\"\n filterset_class = ExporterFilter\n\n\nclass PulpExporterViewSet(ExporterViewSet):\n \"\"\"\n ViewSet for viewing PulpExporters.\n \"\"\"\n\n endpoint_name = \"pulp\"\n serializer_class = PulpExporterSerializer\n queryset = PulpExporter.objects.all()\n\n\nclass FilesystemExporterViewSet(ExporterViewSet):\n \"\"\"\n Endpoint for managing FilesystemExporters. FilesystemExporters are provided as a tech preview.\n \"\"\"\n\n endpoint_name = \"filesystem\"\n serializer_class = FilesystemExporterSerializer\n queryset = FilesystemExporter.objects.all()\n\n\nclass ExportViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n):\n \"\"\"\n ViewSet for viewing exports from an Exporter.\n \"\"\"\n\n endpoint_name = \"exports\"\n nest_prefix = \"exporters\"\n router_lookup = \"export\"\n lookup_field = \"pk\"\n parent_lookup_kwargs = {\"exporter_pk\": \"exporter__pk\"}\n serializer_class = ExportSerializer\n queryset = Export.objects.all()\n parent_viewset = ExporterViewSet\n\n\nclass PulpExportViewSet(ExportViewSet):\n \"\"\"\n ViewSet for viewing exports from a PulpExporter.\n \"\"\"\n\n parent_viewset = PulpExporterViewSet\n serializer_class = PulpExportSerializer\n queryset = PulpExport.objects.all()\n\n @extend_schema(\n request=PulpExportSerializer,\n description=\"Trigger an asynchronous task to export a set of repositories\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request, exporter_pk):\n \"\"\"\n Generates a Task to export the set of repositories assigned to a specific PulpExporter.\n \"\"\"\n # Validate Exporter\n exporter = PulpExporter.objects.get(pk=exporter_pk).cast()\n ExporterSerializer.validate_path(exporter.path, check_is_dir=True)\n\n # Validate Export\n serializer = PulpExportSerializer(data=request.data, context={\"exporter\": exporter})\n serializer.is_valid(raise_exception=True)\n\n # Invoke the export\n task = dispatch(\n pulp_export,\n exclusive_resources=[exporter],\n shared_resources=exporter.repositories.all(),\n kwargs={\"exporter_pk\": str(exporter.pk), \"params\": request.data},\n )\n\n return OperationPostponedResponse(task, request)\n\n\nclass FilesystemExportViewSet(ExportViewSet):\n \"\"\"\n Endpoint for managing FilesystemExports. This endpoint is provided as a tech preview.\n \"\"\"\n\n parent_viewset = FilesystemExporterViewSet\n serializer_class = FilesystemExportSerializer\n queryset = FilesystemExport.objects.all()\n\n @extend_schema(\n request=FilesystemExportSerializer,\n description=\"Trigger an asynchronous task to export files to the filesystem\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request, exporter_pk):\n \"\"\"\n Generates a Task to export files to the filesystem.\n \"\"\"\n # Validate Exporter\n exporter = FilesystemExporter.objects.get(pk=exporter_pk).cast()\n ExporterSerializer.validate_path(exporter.path, check_is_dir=True)\n\n # Validate Export\n serializer = FilesystemExportSerializer(data=request.data, context={\"exporter\": exporter})\n serializer.is_valid(raise_exception=True)\n\n if request.data.get(\"publication\"):\n publication = self.get_resource(request.data[\"publication\"], Publication)\n\n task = dispatch(\n fs_publication_export,\n exclusive_resources=[exporter],\n kwargs={\"exporter_pk\": exporter.pk, \"publication_pk\": publication.pk},\n )\n else:\n repo_version = self.get_resource(request.data[\"repository_version\"], RepositoryVersion)\n\n task = dispatch(\n fs_repo_version_export,\n exclusive_resources=[exporter],\n kwargs={\"exporter_pk\": str(exporter.pk), \"repo_version_pk\": repo_version.pk},\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/exporter.py"}]} | 1,989 | 109 |
gh_patches_debug_2276 | rasdani/github-patches | git_diff | cloudtools__troposphere-1740 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SageMaker Model ContainerDefinition object does not support attribute Mode
Setting a `Mode` attribute within the ContainerDefinition for both the `PrimaryContainer` and `Containers` attributes for creating a Model resources keeps throwing error - `AttributeError: ContainerDefinition object does not support attribute Mode`.
Within the latest cloudformation docs https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition.html the `Mode` attribute is supported.
Without this support, multiple models container(s) creates/updates cannot be configured.
Would you prefer I open a PR or can I wait if it won't take much.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/sagemaker.py`
Content:
```
1 # Copyright (c) 2012-2018, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7 from .validators import integer
8
9
10 class GitConfig(AWSProperty):
11 props = {
12 'Branch': (basestring, False),
13 'RepositoryUrl': (basestring, True),
14 'SecretArn': (basestring, False),
15 }
16
17
18 class CodeRepository(AWSObject):
19 resource_type = "AWS::SageMaker::CodeRepository"
20
21 props = {
22 'CodeRepositoryName': (basestring, False),
23 'GitConfig': (GitConfig, True)
24 }
25
26
27 class Endpoint(AWSObject):
28 resource_type = "AWS::SageMaker::Endpoint"
29
30 props = {
31 'EndpointName': (basestring, False),
32 'EndpointConfigName': (basestring, True),
33 'Tags': (Tags, True)
34 }
35
36
37 class ProductionVariant(AWSProperty):
38 props = {
39 'ModelName': (basestring, True),
40 'VariantName': (basestring, True),
41 'InitialInstanceCount': (integer, True),
42 'InstanceType': (basestring, True),
43 'InitialVariantWeight': (float, True)
44 }
45
46
47 class EndpointConfig(AWSObject):
48 resource_type = "AWS::SageMaker::EndpointConfig"
49
50 props = {
51 'EndpointConfigName': (basestring, False),
52 'ProductionVariants': ([ProductionVariant], True),
53 'KmsKeyId': (basestring, False),
54 'Tags': (Tags, True)
55 }
56
57
58 class ContainerDefinition(AWSProperty):
59 props = {
60 'ContainerHostname': (basestring, False),
61 'Environment': (dict, False),
62 'ModelDataUrl': (basestring, False),
63 'Image': (basestring, True)
64 }
65
66
67 class VpcConfig(AWSProperty):
68 props = {
69 'Subnets': ([basestring], True),
70 'SecurityGroupIds': ([basestring], True)
71 }
72
73
74 class Model(AWSObject):
75 resource_type = "AWS::SageMaker::Model"
76
77 props = {
78 'Containers': ([ContainerDefinition], False),
79 'ExecutionRoleArn': (basestring, True),
80 'ModelName': (basestring, False),
81 'PrimaryContainer': (ContainerDefinition, False),
82 'Tags': (Tags, False),
83 'VpcConfig': (VpcConfig, False),
84 }
85
86
87 class NotebookInstanceLifecycleHook(AWSProperty):
88 props = {
89 'Content': (basestring, False)
90 }
91
92
93 class NotebookInstanceLifecycleConfig(AWSObject):
94 resource_type = "AWS::SageMaker::NotebookInstanceLifecycleConfig"
95
96 props = {
97 'NotebookInstanceLifecycleConfigName': (basestring, False),
98 'OnCreate': ([NotebookInstanceLifecycleHook], False),
99 'OnStart': ([NotebookInstanceLifecycleHook], False)
100 }
101
102
103 class NotebookInstance(AWSObject):
104 resource_type = "AWS::SageMaker::NotebookInstance"
105
106 props = {
107 'AcceleratorTypes': ([basestring], False),
108 'AdditionalCodeRepositories': ([basestring], False),
109 'DefaultCodeRepository': (basestring, False),
110 'DirectInternetAccess': (basestring, False),
111 'InstanceType': (basestring, True),
112 'KmsKeyId': (basestring, False),
113 'LifecycleConfigName': (basestring, False),
114 'NotebookInstanceName': (basestring, False),
115 'RoleArn': (basestring, True),
116 'RootAccess': (basestring, False),
117 'SecurityGroupIds': ([basestring], False),
118 'SubnetId': (basestring, False),
119 'Tags': (Tags, False),
120 'VolumeSizeInGB': (integer, False),
121 }
122
123
124 class CognitoMemberDefinition(AWSProperty):
125 props = {
126 'CognitoClientId': (basestring, True),
127 'CognitoUserGroup': (basestring, True),
128 'CognitoUserPool': (basestring, True),
129 }
130
131
132 class MemberDefinition(AWSProperty):
133 props = {
134 'CognitoMemberDefinition': (CognitoMemberDefinition, True),
135 }
136
137
138 class NotificationConfiguration(AWSProperty):
139 props = {
140 'NotificationTopicArn': (basestring, True),
141 }
142
143
144 class Workteam(AWSObject):
145 resource_type = "AWS::SageMaker::Workteam"
146
147 props = {
148 'Description': (basestring, False),
149 'MemberDefinitions': ([MemberDefinition], False),
150 'NotificationConfiguration': (NotificationConfiguration, False),
151 'Tags': (Tags, False),
152 'WorkteamName': (basestring, False),
153 }
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/sagemaker.py b/troposphere/sagemaker.py
--- a/troposphere/sagemaker.py
+++ b/troposphere/sagemaker.py
@@ -59,6 +59,7 @@
props = {
'ContainerHostname': (basestring, False),
'Environment': (dict, False),
+ 'Mode': (basestring, False),
'ModelDataUrl': (basestring, False),
'Image': (basestring, True)
}
| {"golden_diff": "diff --git a/troposphere/sagemaker.py b/troposphere/sagemaker.py\n--- a/troposphere/sagemaker.py\n+++ b/troposphere/sagemaker.py\n@@ -59,6 +59,7 @@\n props = {\n 'ContainerHostname': (basestring, False),\n 'Environment': (dict, False),\n+ 'Mode': (basestring, False),\n 'ModelDataUrl': (basestring, False),\n 'Image': (basestring, True)\n }\n", "issue": "SageMaker Model ContainerDefinition object does not support attribute Mode\nSetting a `Mode` attribute within the ContainerDefinition for both the `PrimaryContainer` and `Containers` attributes for creating a Model resources keeps throwing error - `AttributeError: ContainerDefinition object does not support attribute Mode`.\r\n\r\nWithin the latest cloudformation docs https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition.html the `Mode` attribute is supported.\r\n\r\nWithout this support, multiple models container(s) creates/updates cannot be configured.\r\n\r\nWould you prefer I open a PR or can I wait if it won't take much.\r\n\r\nThanks.\n", "before_files": [{"content": "# Copyright (c) 2012-2018, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import integer\n\n\nclass GitConfig(AWSProperty):\n props = {\n 'Branch': (basestring, False),\n 'RepositoryUrl': (basestring, True),\n 'SecretArn': (basestring, False),\n }\n\n\nclass CodeRepository(AWSObject):\n resource_type = \"AWS::SageMaker::CodeRepository\"\n\n props = {\n 'CodeRepositoryName': (basestring, False),\n 'GitConfig': (GitConfig, True)\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::SageMaker::Endpoint\"\n\n props = {\n 'EndpointName': (basestring, False),\n 'EndpointConfigName': (basestring, True),\n 'Tags': (Tags, True)\n }\n\n\nclass ProductionVariant(AWSProperty):\n props = {\n 'ModelName': (basestring, True),\n 'VariantName': (basestring, True),\n 'InitialInstanceCount': (integer, True),\n 'InstanceType': (basestring, True),\n 'InitialVariantWeight': (float, True)\n }\n\n\nclass EndpointConfig(AWSObject):\n resource_type = \"AWS::SageMaker::EndpointConfig\"\n\n props = {\n 'EndpointConfigName': (basestring, False),\n 'ProductionVariants': ([ProductionVariant], True),\n 'KmsKeyId': (basestring, False),\n 'Tags': (Tags, True)\n }\n\n\nclass ContainerDefinition(AWSProperty):\n props = {\n 'ContainerHostname': (basestring, False),\n 'Environment': (dict, False),\n 'ModelDataUrl': (basestring, False),\n 'Image': (basestring, True)\n }\n\n\nclass VpcConfig(AWSProperty):\n props = {\n 'Subnets': ([basestring], True),\n 'SecurityGroupIds': ([basestring], True)\n }\n\n\nclass Model(AWSObject):\n resource_type = \"AWS::SageMaker::Model\"\n\n props = {\n 'Containers': ([ContainerDefinition], False),\n 'ExecutionRoleArn': (basestring, True),\n 'ModelName': (basestring, False),\n 'PrimaryContainer': (ContainerDefinition, False),\n 'Tags': (Tags, False),\n 'VpcConfig': (VpcConfig, False),\n }\n\n\nclass NotebookInstanceLifecycleHook(AWSProperty):\n props = {\n 'Content': (basestring, False)\n }\n\n\nclass NotebookInstanceLifecycleConfig(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstanceLifecycleConfig\"\n\n props = {\n 'NotebookInstanceLifecycleConfigName': (basestring, False),\n 'OnCreate': ([NotebookInstanceLifecycleHook], False),\n 'OnStart': ([NotebookInstanceLifecycleHook], False)\n }\n\n\nclass NotebookInstance(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstance\"\n\n props = {\n 'AcceleratorTypes': ([basestring], False),\n 'AdditionalCodeRepositories': ([basestring], False),\n 'DefaultCodeRepository': (basestring, False),\n 'DirectInternetAccess': (basestring, False),\n 'InstanceType': (basestring, True),\n 'KmsKeyId': (basestring, False),\n 'LifecycleConfigName': (basestring, False),\n 'NotebookInstanceName': (basestring, False),\n 'RoleArn': (basestring, True),\n 'RootAccess': (basestring, False),\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetId': (basestring, False),\n 'Tags': (Tags, False),\n 'VolumeSizeInGB': (integer, False),\n }\n\n\nclass CognitoMemberDefinition(AWSProperty):\n props = {\n 'CognitoClientId': (basestring, True),\n 'CognitoUserGroup': (basestring, True),\n 'CognitoUserPool': (basestring, True),\n }\n\n\nclass MemberDefinition(AWSProperty):\n props = {\n 'CognitoMemberDefinition': (CognitoMemberDefinition, True),\n }\n\n\nclass NotificationConfiguration(AWSProperty):\n props = {\n 'NotificationTopicArn': (basestring, True),\n }\n\n\nclass Workteam(AWSObject):\n resource_type = \"AWS::SageMaker::Workteam\"\n\n props = {\n 'Description': (basestring, False),\n 'MemberDefinitions': ([MemberDefinition], False),\n 'NotificationConfiguration': (NotificationConfiguration, False),\n 'Tags': (Tags, False),\n 'WorkteamName': (basestring, False),\n }\n", "path": "troposphere/sagemaker.py"}], "after_files": [{"content": "# Copyright (c) 2012-2018, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import integer\n\n\nclass GitConfig(AWSProperty):\n props = {\n 'Branch': (basestring, False),\n 'RepositoryUrl': (basestring, True),\n 'SecretArn': (basestring, False),\n }\n\n\nclass CodeRepository(AWSObject):\n resource_type = \"AWS::SageMaker::CodeRepository\"\n\n props = {\n 'CodeRepositoryName': (basestring, False),\n 'GitConfig': (GitConfig, True)\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::SageMaker::Endpoint\"\n\n props = {\n 'EndpointName': (basestring, False),\n 'EndpointConfigName': (basestring, True),\n 'Tags': (Tags, True)\n }\n\n\nclass ProductionVariant(AWSProperty):\n props = {\n 'ModelName': (basestring, True),\n 'VariantName': (basestring, True),\n 'InitialInstanceCount': (integer, True),\n 'InstanceType': (basestring, True),\n 'InitialVariantWeight': (float, True)\n }\n\n\nclass EndpointConfig(AWSObject):\n resource_type = \"AWS::SageMaker::EndpointConfig\"\n\n props = {\n 'EndpointConfigName': (basestring, False),\n 'ProductionVariants': ([ProductionVariant], True),\n 'KmsKeyId': (basestring, False),\n 'Tags': (Tags, True)\n }\n\n\nclass ContainerDefinition(AWSProperty):\n props = {\n 'ContainerHostname': (basestring, False),\n 'Environment': (dict, False),\n 'Mode': (basestring, False),\n 'ModelDataUrl': (basestring, False),\n 'Image': (basestring, True)\n }\n\n\nclass VpcConfig(AWSProperty):\n props = {\n 'Subnets': ([basestring], True),\n 'SecurityGroupIds': ([basestring], True)\n }\n\n\nclass Model(AWSObject):\n resource_type = \"AWS::SageMaker::Model\"\n\n props = {\n 'Containers': ([ContainerDefinition], False),\n 'ExecutionRoleArn': (basestring, True),\n 'ModelName': (basestring, False),\n 'PrimaryContainer': (ContainerDefinition, False),\n 'Tags': (Tags, False),\n 'VpcConfig': (VpcConfig, False),\n }\n\n\nclass NotebookInstanceLifecycleHook(AWSProperty):\n props = {\n 'Content': (basestring, False)\n }\n\n\nclass NotebookInstanceLifecycleConfig(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstanceLifecycleConfig\"\n\n props = {\n 'NotebookInstanceLifecycleConfigName': (basestring, False),\n 'OnCreate': ([NotebookInstanceLifecycleHook], False),\n 'OnStart': ([NotebookInstanceLifecycleHook], False)\n }\n\n\nclass NotebookInstance(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstance\"\n\n props = {\n 'AcceleratorTypes': ([basestring], False),\n 'AdditionalCodeRepositories': ([basestring], False),\n 'DefaultCodeRepository': (basestring, False),\n 'DirectInternetAccess': (basestring, False),\n 'InstanceType': (basestring, True),\n 'KmsKeyId': (basestring, False),\n 'LifecycleConfigName': (basestring, False),\n 'NotebookInstanceName': (basestring, False),\n 'RoleArn': (basestring, True),\n 'RootAccess': (basestring, False),\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetId': (basestring, False),\n 'Tags': (Tags, False),\n 'VolumeSizeInGB': (integer, False),\n }\n\n\nclass CognitoMemberDefinition(AWSProperty):\n props = {\n 'CognitoClientId': (basestring, True),\n 'CognitoUserGroup': (basestring, True),\n 'CognitoUserPool': (basestring, True),\n }\n\n\nclass MemberDefinition(AWSProperty):\n props = {\n 'CognitoMemberDefinition': (CognitoMemberDefinition, True),\n }\n\n\nclass NotificationConfiguration(AWSProperty):\n props = {\n 'NotificationTopicArn': (basestring, True),\n }\n\n\nclass Workteam(AWSObject):\n resource_type = \"AWS::SageMaker::Workteam\"\n\n props = {\n 'Description': (basestring, False),\n 'MemberDefinitions': ([MemberDefinition], False),\n 'NotificationConfiguration': (NotificationConfiguration, False),\n 'Tags': (Tags, False),\n 'WorkteamName': (basestring, False),\n }\n", "path": "troposphere/sagemaker.py"}]} | 1,790 | 111 |
gh_patches_debug_10482 | rasdani/github-patches | git_diff | encode__httpx-737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version 0.11.0
This one is a big deal, since it reintroduces the sync client, and is essentially a 1.0 pre-release in terms of how the API looks.
```python
>>> import httpx
>>> httpx.get('https://www.example.com')
<Response [200 OK]>
```
🎉✨ **TA-DA!** ✨🎉
---
# Release notes
## 0.11.0 (January 9th, 2019)
The 0.11 release reintroduces our sync support, so that `httpx` now supports both a standard thread-concurrency API, and an async API.
Existing async `httpx` users that are upgrading to 0.11 should ensure that:
* Async codebases should always use a client instance to make requests, instead of the top-level API.
* The async client is named as `httpx.AsyncClient()`, instead of `httpx.Client()`.
* When instantiating proxy configurations use the `httpx.Proxy()` class, instead of the previous `httpx.HTTPProxy()`. This new configuration class works for configuring both sync and async clients.
We believe the API is now pretty much stable, and are aiming for a 1.0 release sometime on or before April 2020.
### Changed
- Top level API such as `httpx.get(url, ...)`, `httpx.post(url, ...)`, `httpx.request(method, url, ...)` becomes synchronous.
- Added `httpx.Client()` for synchronous clients, with `httpx.AsyncClient` being used for async clients.
- Switched to `proxies=httpx.Proxy(...)` for proxy configuration.
- Network connection errors are wrapped in `httpx.NetworkError`, rather than exposing lower-level exception types directly.
### Removed
- The `request.url.origin` property and `httpx.Origin` class are no longer available.
- The per-request `cert`, `verify`, and `trust_env` arguments are escalated from raising errors if used, to no longer being available. These arguments should be used on a per-client instance instead, or in the top-level API.
- The `stream` argument has escalated from raising an error when used, to no longer being available. Use the `client.stream(...)` or `httpx.stream()` streaming API instead.
### Fixed
- Redirect loop detection matches against `(method, url)` rather than `url`. (Pull #734)
---
# What's next...
I'd expect that we'll likely end up waiting for a period of time after this release, and then end up releasing a 1.0 with either no API changes, or only very minimal API changes. (The only remaining area I can see us still wanting to refine/change, would be some review making sure we've got an exception heirarchy/naming that we're entirely happy to stick with for 1.0 onwards)
---
# Checklist
- [x] Reintroduce `Client` as a sync client. #735
- [x] Reintroduce `WSGIDispatch`. #735
- [x] Top-level API becomes sync, not async. #735
- [x] Drop `Origin` from public API. #688
- [x] Use `httpx.Proxy()` for proxy configuration, not the `httpx.HTTPProxy` dispatch class. #713
- [ ] ~Consider switching `client.params`, `client.headers`, `client.cookies` so that they don't have a setter/getter mismatch.~ Refs #678 #274
- [ ] ~Consider dropping UDS support.~ #723
- [x] Wrap IO Exceptions in httpx exceptions. #707
- [x] Docs #727
- [x] `httpx.Auth` becomes public API. #732 #731
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 license="BSD",
44 description="The next generation HTTP client.",
45 long_description=get_long_description(),
46 long_description_content_type="text/markdown",
47 author="Tom Christie",
48 author_email="[email protected]",
49 package_data={"httpx": ["py.typed"]},
50 packages=get_packages("httpx"),
51 include_package_data=True,
52 zip_safe=False,
53 install_requires=[
54 "certifi",
55 "hstspreload",
56 "chardet==3.*",
57 "h11>=0.8,<0.10",
58 "h2==3.*",
59 "idna==2.*",
60 "rfc3986>=1.3,<2",
61 "sniffio==1.*",
62 "urllib3==1.*",
63 ],
64 classifiers=[
65 "Development Status :: 3 - Alpha",
66 "Environment :: Web Environment",
67 "Intended Audience :: Developers",
68 "License :: OSI Approved :: BSD License",
69 "Operating System :: OS Independent",
70 "Topic :: Internet :: WWW/HTTP",
71 "Framework :: AsyncIO",
72 "Framework :: Trio",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3.6",
75 "Programming Language :: Python :: 3.7",
76 "Programming Language :: Python :: 3.8",
77 ],
78 )
79
```
Path: `httpx/__version__.py`
Content:
```
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.10.1"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.10.1"
+__version__ = "0.11.0"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,7 +62,7 @@
"urllib3==1.*",
],
classifiers=[
- "Development Status :: 3 - Alpha",
+ "Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
| {"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.10.1\"\n+__version__ = \"0.11.0\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,7 +62,7 @@\n \"urllib3==1.*\",\n ],\n classifiers=[\n- \"Development Status :: 3 - Alpha\",\n+ \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n", "issue": "Version 0.11.0\nThis one is a big deal, since it reintroduces the sync client, and is essentially a 1.0 pre-release in terms of how the API looks.\r\n\r\n```python\r\n>>> import httpx\r\n>>> httpx.get('https://www.example.com')\r\n<Response [200 OK]>\r\n```\r\n\r\n\ud83c\udf89\u2728 **TA-DA!** \u2728\ud83c\udf89\r\n\r\n---\r\n\r\n# Release notes\r\n\r\n## 0.11.0 (January 9th, 2019)\r\n\r\nThe 0.11 release reintroduces our sync support, so that `httpx` now supports both a standard thread-concurrency API, and an async API.\r\n\r\nExisting async `httpx` users that are upgrading to 0.11 should ensure that:\r\n\r\n* Async codebases should always use a client instance to make requests, instead of the top-level API.\r\n* The async client is named as `httpx.AsyncClient()`, instead of `httpx.Client()`.\r\n* When instantiating proxy configurations use the `httpx.Proxy()` class, instead of the previous `httpx.HTTPProxy()`. This new configuration class works for configuring both sync and async clients.\r\n\r\nWe believe the API is now pretty much stable, and are aiming for a 1.0 release sometime on or before April 2020.\r\n\r\n### Changed\r\n\r\n- Top level API such as `httpx.get(url, ...)`, `httpx.post(url, ...)`, `httpx.request(method, url, ...)` becomes synchronous.\r\n- Added `httpx.Client()` for synchronous clients, with `httpx.AsyncClient` being used for async clients.\r\n- Switched to `proxies=httpx.Proxy(...)` for proxy configuration.\r\n- Network connection errors are wrapped in `httpx.NetworkError`, rather than exposing lower-level exception types directly.\r\n\r\n### Removed\r\n\r\n- The `request.url.origin` property and `httpx.Origin` class are no longer available.\r\n- The per-request `cert`, `verify`, and `trust_env` arguments are escalated from raising errors if used, to no longer being available. These arguments should be used on a per-client instance instead, or in the top-level API.\r\n- The `stream` argument has escalated from raising an error when used, to no longer being available. Use the `client.stream(...)` or `httpx.stream()` streaming API instead.\r\n\r\n### Fixed\r\n\r\n- Redirect loop detection matches against `(method, url)` rather than `url`. (Pull #734)\r\n\r\n---\r\n\r\n# What's next...\r\n\r\nI'd expect that we'll likely end up waiting for a period of time after this release, and then end up releasing a 1.0 with either no API changes, or only very minimal API changes. (The only remaining area I can see us still wanting to refine/change, would be some review making sure we've got an exception heirarchy/naming that we're entirely happy to stick with for 1.0 onwards)\r\n\r\n---\r\n\r\n# Checklist\r\n\r\n- [x] Reintroduce `Client` as a sync client. #735\r\n- [x] Reintroduce `WSGIDispatch`. #735\r\n- [x] Top-level API becomes sync, not async. #735\r\n- [x] Drop `Origin` from public API. #688\r\n- [x] Use `httpx.Proxy()` for proxy configuration, not the `httpx.HTTPProxy` dispatch class. #713\r\n- [ ] ~Consider switching `client.params`, `client.headers`, `client.cookies` so that they don't have a setter/getter mismatch.~ Refs #678 #274\r\n- [ ] ~Consider dropping UDS support.~ #723\r\n- [x] Wrap IO Exceptions in httpx exceptions. #707\r\n- [x] Docs #727\r\n- [x] `httpx.Auth` becomes public API. #732 #731\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11>=0.8,<0.10\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n \"urllib3==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.10.1\"\n", "path": "httpx/__version__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11>=0.8,<0.10\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n \"urllib3==1.*\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.11.0\"\n", "path": "httpx/__version__.py"}]} | 1,830 | 189 |
gh_patches_debug_8703 | rasdani/github-patches | git_diff | svthalia__concrexit-1836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeviceListView permission not checked
### Describe the bug
The `DeviceListView` of `api/v2` has a `IsAuthenticatedOwnerOrReadOnly` permission which is never checked as `get_object` is not used in the view.
### How to reproduce
Steps to reproduce the behaviour:
1. Set a breakpoint in the `IsAuthenticatedOwnerOrReadOnly` class
2. Enable the debugger
3. See that the `has_object_permission` method is not called on a request to the corresponding endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/pushnotifications/api/v2/views.py`
Content:
```
1 from django.utils.translation import get_language_from_request
2 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
3 from rest_framework.filters import OrderingFilter
4 from rest_framework.generics import (
5 ListAPIView,
6 RetrieveAPIView,
7 CreateAPIView,
8 UpdateAPIView,
9 )
10
11 from pushnotifications.api.v2.filters import CategoryFilter
12 from pushnotifications.api.v2.permissions import IsAuthenticatedOwnerOrReadOnly
13 from pushnotifications.api.v2.serializers import (
14 DeviceSerializer,
15 MessageSerializer,
16 CategorySerializer,
17 )
18 from pushnotifications.models import Device, Category, Message
19 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
20
21
22 class DeviceListView(ListAPIView, CreateAPIView):
23 """Returns an overview of all devices that are owner by the user."""
24
25 permission_classes = [
26 IsAuthenticatedOrTokenHasScopeForMethod,
27 IsAuthenticatedOwnerOrReadOnly,
28 ]
29 serializer_class = DeviceSerializer
30 queryset = Device.objects.all()
31 required_scopes_per_method = {
32 "GET": ["pushnotifications:read"],
33 "POST": ["pushnotifications:write"],
34 }
35
36 def get_queryset(self):
37 if self.request.user:
38 return Device.objects.filter(user=self.request.user)
39 return super().get_queryset()
40
41 def perform_create(self, serializer):
42 language = get_language_from_request(self.request)
43
44 try:
45 serializer.instance = Device.objects.get(
46 user=self.request.user,
47 registration_id=serializer.validated_data["registration_id"],
48 )
49 except Device.DoesNotExist:
50 pass
51
52 data = serializer.validated_data
53 categories = [c.pk for c in Category.objects.all()]
54 if "receive_category" in data and len(data["receive_category"]) > 0:
55 categories = data["receive_category"] + ["general"]
56
57 serializer.save(
58 user=self.request.user, language=language, receive_category=categories
59 )
60
61
62 class DeviceDetailView(RetrieveAPIView, UpdateAPIView):
63 """Returns details of a device."""
64
65 permission_classes = [
66 IsAuthenticatedOrTokenHasScope,
67 IsAuthenticatedOwnerOrReadOnly,
68 ]
69 serializer_class = DeviceSerializer
70 required_scopes = ["pushnotifications:read", "pushnotifications:write"]
71 queryset = Device.objects.all()
72
73 def perform_update(self, serializer):
74 serializer.save(user=self.request.user)
75
76
77 class CategoryListView(ListAPIView):
78 """Returns an overview of all available categories for push notifications."""
79
80 serializer_class = CategorySerializer
81 queryset = Category.objects.all()
82 required_scopes = ["pushnotifications:read"]
83
84
85 class MessageListView(ListAPIView):
86 """Returns a list of message sent to the user."""
87
88 serializer_class = MessageSerializer
89 required_scopes = ["pushnotifications:read"]
90 permission_classes = [
91 IsAuthenticatedOrTokenHasScope,
92 ]
93 filter_backends = (OrderingFilter, CategoryFilter)
94 ordering_fields = ("sent",)
95
96 def get_queryset(self):
97 if self.request.user:
98 return Message.all_objects.filter(users=self.request.user)
99 return Message.all_objects.all()
100
101
102 class MessageDetailView(RetrieveAPIView):
103 """Returns a message."""
104
105 serializer_class = MessageSerializer
106 required_scopes = ["pushnotifications:read"]
107 permission_classes = [
108 IsAuthenticatedOrTokenHasScope,
109 ]
110
111 def get_queryset(self):
112 if self.request.user:
113 return Message.all_objects.filter(users=self.request.user)
114 return Message.all_objects.all()
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/pushnotifications/api/v2/views.py b/website/pushnotifications/api/v2/views.py
--- a/website/pushnotifications/api/v2/views.py
+++ b/website/pushnotifications/api/v2/views.py
@@ -22,10 +22,7 @@
class DeviceListView(ListAPIView, CreateAPIView):
"""Returns an overview of all devices that are owner by the user."""
- permission_classes = [
- IsAuthenticatedOrTokenHasScopeForMethod,
- IsAuthenticatedOwnerOrReadOnly,
- ]
+ permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]
serializer_class = DeviceSerializer
queryset = Device.objects.all()
required_scopes_per_method = {
| {"golden_diff": "diff --git a/website/pushnotifications/api/v2/views.py b/website/pushnotifications/api/v2/views.py\n--- a/website/pushnotifications/api/v2/views.py\n+++ b/website/pushnotifications/api/v2/views.py\n@@ -22,10 +22,7 @@\n class DeviceListView(ListAPIView, CreateAPIView):\n \"\"\"Returns an overview of all devices that are owner by the user.\"\"\"\n \n- permission_classes = [\n- IsAuthenticatedOrTokenHasScopeForMethod,\n- IsAuthenticatedOwnerOrReadOnly,\n- ]\n+ permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n serializer_class = DeviceSerializer\n queryset = Device.objects.all()\n required_scopes_per_method = {\n", "issue": "DeviceListView permission not checked\n### Describe the bug\r\nThe `DeviceListView` of `api/v2` has a `IsAuthenticatedOwnerOrReadOnly` permission which is never checked as `get_object` is not used in the view.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Set a breakpoint in the `IsAuthenticatedOwnerOrReadOnly` class\r\n2. Enable the debugger\r\n3. See that the `has_object_permission` method is not called on a request to the corresponding endpoint\r\n\n", "before_files": [{"content": "from django.utils.translation import get_language_from_request\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n CreateAPIView,\n UpdateAPIView,\n)\n\nfrom pushnotifications.api.v2.filters import CategoryFilter\nfrom pushnotifications.api.v2.permissions import IsAuthenticatedOwnerOrReadOnly\nfrom pushnotifications.api.v2.serializers import (\n DeviceSerializer,\n MessageSerializer,\n CategorySerializer,\n)\nfrom pushnotifications.models import Device, Category, Message\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass DeviceListView(ListAPIView, CreateAPIView):\n \"\"\"Returns an overview of all devices that are owner by the user.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n IsAuthenticatedOwnerOrReadOnly,\n ]\n serializer_class = DeviceSerializer\n queryset = Device.objects.all()\n required_scopes_per_method = {\n \"GET\": [\"pushnotifications:read\"],\n \"POST\": [\"pushnotifications:write\"],\n }\n\n def get_queryset(self):\n if self.request.user:\n return Device.objects.filter(user=self.request.user)\n return super().get_queryset()\n\n def perform_create(self, serializer):\n language = get_language_from_request(self.request)\n\n try:\n serializer.instance = Device.objects.get(\n user=self.request.user,\n registration_id=serializer.validated_data[\"registration_id\"],\n )\n except Device.DoesNotExist:\n pass\n\n data = serializer.validated_data\n categories = [c.pk for c in Category.objects.all()]\n if \"receive_category\" in data and len(data[\"receive_category\"]) > 0:\n categories = data[\"receive_category\"] + [\"general\"]\n\n serializer.save(\n user=self.request.user, language=language, receive_category=categories\n )\n\n\nclass DeviceDetailView(RetrieveAPIView, UpdateAPIView):\n \"\"\"Returns details of a device.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n IsAuthenticatedOwnerOrReadOnly,\n ]\n serializer_class = DeviceSerializer\n required_scopes = [\"pushnotifications:read\", \"pushnotifications:write\"]\n queryset = Device.objects.all()\n\n def perform_update(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass CategoryListView(ListAPIView):\n \"\"\"Returns an overview of all available categories for push notifications.\"\"\"\n\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n required_scopes = [\"pushnotifications:read\"]\n\n\nclass MessageListView(ListAPIView):\n \"\"\"Returns a list of message sent to the user.\"\"\"\n\n serializer_class = MessageSerializer\n required_scopes = [\"pushnotifications:read\"]\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n filter_backends = (OrderingFilter, CategoryFilter)\n ordering_fields = (\"sent\",)\n\n def get_queryset(self):\n if self.request.user:\n return Message.all_objects.filter(users=self.request.user)\n return Message.all_objects.all()\n\n\nclass MessageDetailView(RetrieveAPIView):\n \"\"\"Returns a message.\"\"\"\n\n serializer_class = MessageSerializer\n required_scopes = [\"pushnotifications:read\"]\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n\n def get_queryset(self):\n if self.request.user:\n return Message.all_objects.filter(users=self.request.user)\n return Message.all_objects.all()\n", "path": "website/pushnotifications/api/v2/views.py"}], "after_files": [{"content": "from django.utils.translation import get_language_from_request\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n CreateAPIView,\n UpdateAPIView,\n)\n\nfrom pushnotifications.api.v2.filters import CategoryFilter\nfrom pushnotifications.api.v2.permissions import IsAuthenticatedOwnerOrReadOnly\nfrom pushnotifications.api.v2.serializers import (\n DeviceSerializer,\n MessageSerializer,\n CategorySerializer,\n)\nfrom pushnotifications.models import Device, Category, Message\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass DeviceListView(ListAPIView, CreateAPIView):\n \"\"\"Returns an overview of all devices that are owner by the user.\"\"\"\n\n permission_classes = [IsAuthenticatedOrTokenHasScopeForMethod]\n serializer_class = DeviceSerializer\n queryset = Device.objects.all()\n required_scopes_per_method = {\n \"GET\": [\"pushnotifications:read\"],\n \"POST\": [\"pushnotifications:write\"],\n }\n\n def get_queryset(self):\n if self.request.user:\n return Device.objects.filter(user=self.request.user)\n return super().get_queryset()\n\n def perform_create(self, serializer):\n language = get_language_from_request(self.request)\n\n try:\n serializer.instance = Device.objects.get(\n user=self.request.user,\n registration_id=serializer.validated_data[\"registration_id\"],\n )\n except Device.DoesNotExist:\n pass\n\n data = serializer.validated_data\n categories = [c.pk for c in Category.objects.all()]\n if \"receive_category\" in data and len(data[\"receive_category\"]) > 0:\n categories = data[\"receive_category\"] + [\"general\"]\n\n serializer.save(\n user=self.request.user, language=language, receive_category=categories\n )\n\n\nclass DeviceDetailView(RetrieveAPIView, UpdateAPIView):\n \"\"\"Returns details of a device.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n IsAuthenticatedOwnerOrReadOnly,\n ]\n serializer_class = DeviceSerializer\n required_scopes = [\"pushnotifications:read\", \"pushnotifications:write\"]\n queryset = Device.objects.all()\n\n def perform_update(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass CategoryListView(ListAPIView):\n \"\"\"Returns an overview of all available categories for push notifications.\"\"\"\n\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n required_scopes = [\"pushnotifications:read\"]\n\n\nclass MessageListView(ListAPIView):\n \"\"\"Returns a list of message sent to the user.\"\"\"\n\n serializer_class = MessageSerializer\n required_scopes = [\"pushnotifications:read\"]\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n filter_backends = (OrderingFilter, CategoryFilter)\n ordering_fields = (\"sent\",)\n\n def get_queryset(self):\n if self.request.user:\n return Message.all_objects.filter(users=self.request.user)\n return Message.all_objects.all()\n\n\nclass MessageDetailView(RetrieveAPIView):\n \"\"\"Returns a message.\"\"\"\n\n serializer_class = MessageSerializer\n required_scopes = [\"pushnotifications:read\"]\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n\n def get_queryset(self):\n if self.request.user:\n return Message.all_objects.filter(users=self.request.user)\n return Message.all_objects.all()\n", "path": "website/pushnotifications/api/v2/views.py"}]} | 1,325 | 155 |
gh_patches_debug_20327 | rasdani/github-patches | git_diff | pypi__warehouse-10438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use natural sort order for file listings
**What's the problem this feature will solve?**
Currently on https://pypi.org/project/lxml/4.6.3/#files, the files are listed as:
- lxml-4.6.3-cp27-cp27mu-manylinux1_x86_64.whl
- lxml-4.6.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl
- lxml-4.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl
- lxml-4.6.3-cp35-cp35m-manylinux1_i686.whl
This is because the strings are sorted as 27 < 310 < 35, for strings.
**Describe the solution you'd like**
Use natural sorting order for filenames, similar to what we did for https://github.com/pypa/trove-classifiers/issues/56.
This _may_ also make sense for the simple pages, where it would be a nice-to-have when a human looks at the page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/packaging/views.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.view import view_config
15 from sqlalchemy.orm.exc import NoResultFound
16
17 from warehouse.accounts.models import User
18 from warehouse.cache.origin import origin_cache
19 from warehouse.packaging.models import Project, Release, Role
20 from warehouse.utils import readme
21
22
23 @view_config(
24 route_name="packaging.project",
25 context=Project,
26 renderer="packaging/detail.html",
27 decorator=[
28 origin_cache(
29 1 * 24 * 60 * 60, stale_if_error=5 * 24 * 60 * 60 # 1 day, 5 days stale
30 )
31 ],
32 has_translations=True,
33 )
34 def project_detail(project, request):
35 if project.name != request.matchdict.get("name", project.name):
36 return HTTPMovedPermanently(request.current_route_path(name=project.name))
37
38 try:
39 release = (
40 request.db.query(Release)
41 .filter(Release.project == project)
42 .order_by(
43 Release.yanked,
44 Release.is_prerelease.nullslast(),
45 Release._pypi_ordering.desc(),
46 )
47 .limit(1)
48 .one()
49 )
50 except NoResultFound:
51 raise HTTPNotFound
52
53 return release_detail(release, request)
54
55
56 @view_config(
57 route_name="packaging.release",
58 context=Release,
59 renderer="packaging/detail.html",
60 decorator=[
61 origin_cache(
62 1 * 24 * 60 * 60, stale_if_error=5 * 24 * 60 * 60 # 1 day, 5 days stale
63 )
64 ],
65 has_translations=True,
66 )
67 def release_detail(release, request):
68 project = release.project
69
70 # Check if the requested version is equivalent but not exactly the same as
71 # the release's version. Use `.get` because this view is used by
72 # `project_detail` and there may not be a version.
73 #
74 # This also handles the case where both the version and the project name
75 # need adjusted, and handles it in a single redirect.
76 if release.version != request.matchdict.get("version", release.version):
77 return HTTPMovedPermanently(
78 request.current_route_path(name=project.name, version=release.version)
79 )
80
81 # It's possible that the requested version was correct (or not provided),
82 # but we still need to adjust the project name.
83 if project.name != request.matchdict.get("name", project.name):
84 return HTTPMovedPermanently(request.current_route_path(name=project.name))
85
86 # Grab the rendered description if it exists, and if it doesn't, then we will render
87 # it inline.
88 # TODO: Remove the fallback to rendering inline and only support displaying the
89 # already rendered content.
90 if release.description.html:
91 description = release.description.html
92 else:
93 description = readme.render(
94 release.description.raw, release.description.content_type
95 )
96
97 # Get all of the maintainers for this project.
98 maintainers = [
99 r.user
100 for r in (
101 request.db.query(Role)
102 .join(User)
103 .filter(Role.project == project)
104 .distinct(User.username)
105 .order_by(User.username)
106 .all()
107 )
108 ]
109
110 # Get the license from both the `Classifier` and `License` metadata fields
111 license_classifiers = ", ".join(
112 c.split(" :: ")[-1] for c in release.classifiers if c.startswith("License")
113 )
114
115 # Make a best effort when the entire license text is given by using the
116 # first line only.
117 short_license = release.license.split("\n")[0] if release.license else None
118
119 if license_classifiers and short_license:
120 license = f"{license_classifiers} ({short_license})"
121 else:
122 license = license_classifiers or short_license or None
123
124 return {
125 "project": project,
126 "release": release,
127 "description": description,
128 "files": release.files.all(),
129 "latest_version": project.latest_version,
130 "all_versions": project.all_versions,
131 "maintainers": maintainers,
132 "license": license,
133 }
134
135
136 @view_config(
137 route_name="includes.edit-project-button",
138 context=Project,
139 renderer="includes/manage-project-button.html",
140 uses_session=True,
141 permission="manage:project",
142 has_translations=True,
143 )
144 def edit_project_button(project, request):
145 return {"project": project}
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py
--- a/warehouse/packaging/views.py
+++ b/warehouse/packaging/views.py
@@ -10,6 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from natsort import natsorted
from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
@@ -125,7 +126,8 @@
"project": project,
"release": release,
"description": description,
- "files": release.files.all(),
+ # We cannot easily sort naturally in SQL, sort here and pass to template
+ "files": natsorted(release.files.all(), reverse=True, key=lambda f: f.filename),
"latest_version": project.latest_version,
"all_versions": project.all_versions,
"maintainers": maintainers,
| {"golden_diff": "diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py\n--- a/warehouse/packaging/views.py\n+++ b/warehouse/packaging/views.py\n@@ -10,6 +10,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from natsort import natsorted\n from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\n from pyramid.view import view_config\n from sqlalchemy.orm.exc import NoResultFound\n@@ -125,7 +126,8 @@\n \"project\": project,\n \"release\": release,\n \"description\": description,\n- \"files\": release.files.all(),\n+ # We cannot easily sort naturally in SQL, sort here and pass to template\n+ \"files\": natsorted(release.files.all(), reverse=True, key=lambda f: f.filename),\n \"latest_version\": project.latest_version,\n \"all_versions\": project.all_versions,\n \"maintainers\": maintainers,\n", "issue": "Use natural sort order for file listings\n**What's the problem this feature will solve?**\r\n\r\nCurrently on https://pypi.org/project/lxml/4.6.3/#files, the files are listed as:\r\n\r\n- lxml-4.6.3-cp27-cp27mu-manylinux1_x86_64.whl\r\n- lxml-4.6.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl\r\n- lxml-4.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl\r\n- lxml-4.6.3-cp35-cp35m-manylinux1_i686.whl\r\n\r\nThis is because the strings are sorted as 27 < 310 < 35, for strings.\r\n\r\n**Describe the solution you'd like**\r\n\r\nUse natural sorting order for filenames, similar to what we did for https://github.com/pypa/trove-classifiers/issues/56.\r\n\r\nThis _may_ also make sense for the simple pages, where it would be a nice-to-have when a human looks at the page.\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release, Role\nfrom warehouse.utils import readme\n\n\n@view_config(\n route_name=\"packaging.project\",\n context=Project,\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, stale_if_error=5 * 24 * 60 * 60 # 1 day, 5 days stale\n )\n ],\n has_translations=True,\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(request.current_route_path(name=project.name))\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(\n Release.yanked,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .limit(1)\n .one()\n )\n except NoResultFound:\n raise HTTPNotFound\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n context=Release,\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, stale_if_error=5 * 24 * 60 * 60 # 1 day, 5 days stale\n )\n ],\n has_translations=True,\n)\ndef release_detail(release, request):\n project = release.project\n\n # Check if the requested version is equivalent but not exactly the same as\n # the release's version. Use `.get` because this view is used by\n # `project_detail` and there may not be a version.\n #\n # This also handles the case where both the version and the project name\n # need adjusted, and handles it in a single redirect.\n if release.version != request.matchdict.get(\"version\", release.version):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name, version=release.version)\n )\n\n # It's possible that the requested version was correct (or not provided),\n # but we still need to adjust the project name.\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(request.current_route_path(name=project.name))\n\n # Grab the rendered description if it exists, and if it doesn't, then we will render\n # it inline.\n # TODO: Remove the fallback to rendering inline and only support displaying the\n # already rendered content.\n if release.description.html:\n description = release.description.html\n else:\n description = readme.render(\n release.description.raw, release.description.content_type\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n # Get the license from both the `Classifier` and `License` metadata fields\n license_classifiers = \", \".join(\n c.split(\" :: \")[-1] for c in release.classifiers if c.startswith(\"License\")\n )\n\n # Make a best effort when the entire license text is given by using the\n # first line only.\n short_license = release.license.split(\"\\n\")[0] if release.license else None\n\n if license_classifiers and short_license:\n license = f\"{license_classifiers} ({short_license})\"\n else:\n license = license_classifiers or short_license or None\n\n return {\n \"project\": project,\n \"release\": release,\n \"description\": description,\n \"files\": release.files.all(),\n \"latest_version\": project.latest_version,\n \"all_versions\": project.all_versions,\n \"maintainers\": maintainers,\n \"license\": license,\n }\n\n\n@view_config(\n route_name=\"includes.edit-project-button\",\n context=Project,\n renderer=\"includes/manage-project-button.html\",\n uses_session=True,\n permission=\"manage:project\",\n has_translations=True,\n)\ndef edit_project_button(project, request):\n return {\"project\": project}\n", "path": "warehouse/packaging/views.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom natsort import natsorted\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release, Role\nfrom warehouse.utils import readme\n\n\n@view_config(\n route_name=\"packaging.project\",\n context=Project,\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, stale_if_error=5 * 24 * 60 * 60 # 1 day, 5 days stale\n )\n ],\n has_translations=True,\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(request.current_route_path(name=project.name))\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(\n Release.yanked,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .limit(1)\n .one()\n )\n except NoResultFound:\n raise HTTPNotFound\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n context=Release,\n renderer=\"packaging/detail.html\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, stale_if_error=5 * 24 * 60 * 60 # 1 day, 5 days stale\n )\n ],\n has_translations=True,\n)\ndef release_detail(release, request):\n project = release.project\n\n # Check if the requested version is equivalent but not exactly the same as\n # the release's version. Use `.get` because this view is used by\n # `project_detail` and there may not be a version.\n #\n # This also handles the case where both the version and the project name\n # need adjusted, and handles it in a single redirect.\n if release.version != request.matchdict.get(\"version\", release.version):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name, version=release.version)\n )\n\n # It's possible that the requested version was correct (or not provided),\n # but we still need to adjust the project name.\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(request.current_route_path(name=project.name))\n\n # Grab the rendered description if it exists, and if it doesn't, then we will render\n # it inline.\n # TODO: Remove the fallback to rendering inline and only support displaying the\n # already rendered content.\n if release.description.html:\n description = release.description.html\n else:\n description = readme.render(\n release.description.raw, release.description.content_type\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n # Get the license from both the `Classifier` and `License` metadata fields\n license_classifiers = \", \".join(\n c.split(\" :: \")[-1] for c in release.classifiers if c.startswith(\"License\")\n )\n\n # Make a best effort when the entire license text is given by using the\n # first line only.\n short_license = release.license.split(\"\\n\")[0] if release.license else None\n\n if license_classifiers and short_license:\n license = f\"{license_classifiers} ({short_license})\"\n else:\n license = license_classifiers or short_license or None\n\n return {\n \"project\": project,\n \"release\": release,\n \"description\": description,\n # We cannot easily sort naturally in SQL, sort here and pass to template\n \"files\": natsorted(release.files.all(), reverse=True, key=lambda f: f.filename),\n \"latest_version\": project.latest_version,\n \"all_versions\": project.all_versions,\n \"maintainers\": maintainers,\n \"license\": license,\n }\n\n\n@view_config(\n route_name=\"includes.edit-project-button\",\n context=Project,\n renderer=\"includes/manage-project-button.html\",\n uses_session=True,\n permission=\"manage:project\",\n has_translations=True,\n)\ndef edit_project_button(project, request):\n return {\"project\": project}\n", "path": "warehouse/packaging/views.py"}]} | 1,991 | 219 |
gh_patches_debug_16052 | rasdani/github-patches | git_diff | google__flax-985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Port ensembling HOWTO from old diff based system
And instead, use a standalone doc with tests like in #771
Here is the old (pre-Linen) HOWTO diff, for reference:
https://github.com/google/flax/blob/master/howtos/diffs/ensembling.diff
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/_ext/codediff.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import dataclasses
15 from typing import Optional, Sequence
16 import itertools
17
18 from docutils import nodes
19 from docutils.parsers.rst import directives
20 from docutils.statemachine import ViewList
21
22 import sphinx
23 from sphinx.util.docutils import SphinxDirective
24 """Sphinx directive for creating code diff tables.
25
26 Use directive as follows:
27
28 .. codediff::
29 :title-left: <LEFT_CODE_BLOCK_TITLE>
30 :title-right: <RIGHT_CODE_BLOCK_TITLE>
31 :highlight-left: <LINES_TO_HIGHLIGHT_LEFT>
32 :highlight-right: <LINES_TO_HIGHLIGHT_RIGHT>
33
34 <CODE_BLOCK_LEFT>
35 ---
36 <CODE_BLOCK_RIGHT>
37 """
38
39 class CodeDiffParser:
40 def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'):
41 if code_sep not in lines:
42 raise ValueError('Code separator not found! Code snippets should be '
43 f'separated by {code_sep}.')
44 idx = lines.index(code_sep)
45 code_left = self._code_block(lines[0: idx])
46 code_right = self._code_block(lines[idx+1:])
47
48 self.max_left = max(len(x) for x in code_left + [title_left])
49 self.max_right = max(len(x) for x in code_right + [title_right])
50
51 output = [
52 self._hline(),
53 self._table_row(title_left, title_right),
54 self._hline(),
55 ]
56
57 for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''):
58 output += [self._table_row(l, r)]
59
60 return output + [self._hline()]
61
62 def _code_block(self, lines):
63 # Remove right trailing whitespace so we can detect the comments.
64 lines = [x.rstrip() for x in lines]
65 highlight = lambda x : x.endswith('#!')
66 code = map(lambda x : x[:-2].rstrip() if highlight(x) else x, lines)
67 highlights = [i+1 for i in range(len(lines)) if highlight(lines[i])]
68 highlights = ','.join(str(i) for i in highlights)
69
70 directive = ['.. code-block:: python']
71 if highlights:
72 directive += [f' :emphasize-lines: {highlights}']
73
74 # Indent code and add empty line so the code is picked up by the directive.
75 return directive + [''] + list(map(lambda x: ' ' + x, code))
76
77 def _hline(self):
78 return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+'
79
80 def _rfill(self, text, max_len):
81 return text + ' ' * (max_len-len(text))
82
83 def _table_row(self, left, right):
84 text_left = self._rfill(left, self.max_left)
85 text_right = self._rfill(right, self.max_right)
86 return '| ' + text_left + ' | ' + text_right + ' |'
87
88
89 class CodeDiffDirective(SphinxDirective):
90 has_content = True
91 option_spec = {
92 'title_left': directives.unchanged,
93 'title_right': directives.unchanged,
94 'code_sep': directives.unchanged,
95 }
96
97 def run(self):
98 new_content = CodeDiffParser().parse(list(self.content), **self.options)
99
100 node = nodes.paragraph()
101 self.content = ViewList(new_content, self.content.parent)
102 self.state.nested_parse(self.content, self.content_offset, node)
103 return [node]
104
105 def setup(app):
106 app.add_directive('codediff', CodeDiffDirective)
107
108 return {
109 'version': sphinx.__display_version__,
110 'parallel_read_safe': True,
111 'parallel_write_safe': True,
112 }
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/_ext/codediff.py b/docs/_ext/codediff.py
--- a/docs/_ext/codediff.py
+++ b/docs/_ext/codediff.py
@@ -26,14 +26,14 @@
Use directive as follows:
.. codediff::
- :title-left: <LEFT_CODE_BLOCK_TITLE>
- :title-right: <RIGHT_CODE_BLOCK_TITLE>
- :highlight-left: <LINES_TO_HIGHLIGHT_LEFT>
- :highlight-right: <LINES_TO_HIGHLIGHT_RIGHT>
+ :title_left: <LEFT_CODE_BLOCK_TITLE>
+ :title_right: <RIGHT_CODE_BLOCK_TITLE>
<CODE_BLOCK_LEFT>
---
<CODE_BLOCK_RIGHT>
+
+In order to highlight a line of code, prepend it with "#!".
"""
class CodeDiffParser:
@@ -94,7 +94,7 @@
'code_sep': directives.unchanged,
}
- def run(self):
+ def run(self):
new_content = CodeDiffParser().parse(list(self.content), **self.options)
node = nodes.paragraph()
| {"golden_diff": "diff --git a/docs/_ext/codediff.py b/docs/_ext/codediff.py\n--- a/docs/_ext/codediff.py\n+++ b/docs/_ext/codediff.py\n@@ -26,14 +26,14 @@\n Use directive as follows:\n \n .. codediff::\n- :title-left: <LEFT_CODE_BLOCK_TITLE>\n- :title-right: <RIGHT_CODE_BLOCK_TITLE>\n- :highlight-left: <LINES_TO_HIGHLIGHT_LEFT>\n- :highlight-right: <LINES_TO_HIGHLIGHT_RIGHT>\n+ :title_left: <LEFT_CODE_BLOCK_TITLE>\n+ :title_right: <RIGHT_CODE_BLOCK_TITLE>\n \n <CODE_BLOCK_LEFT>\n ---\n <CODE_BLOCK_RIGHT>\n+\n+In order to highlight a line of code, prepend it with \"#!\".\n \"\"\"\n \n class CodeDiffParser:\n@@ -94,7 +94,7 @@\n 'code_sep': directives.unchanged,\n }\n \n- def run(self): \n+ def run(self):\n new_content = CodeDiffParser().parse(list(self.content), **self.options)\n \n node = nodes.paragraph()\n", "issue": "Port ensembling HOWTO from old diff based system\nAnd instead, use a standalone doc with tests like in #771\r\n\r\nHere is the old (pre-Linen) HOWTO diff, for reference:\r\nhttps://github.com/google/flax/blob/master/howtos/diffs/ensembling.diff\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport dataclasses\nfrom typing import Optional, Sequence\nimport itertools\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import ViewList\n\nimport sphinx\nfrom sphinx.util.docutils import SphinxDirective\n\"\"\"Sphinx directive for creating code diff tables.\n\nUse directive as follows:\n\n.. codediff::\n :title-left: <LEFT_CODE_BLOCK_TITLE>\n :title-right: <RIGHT_CODE_BLOCK_TITLE>\n :highlight-left: <LINES_TO_HIGHLIGHT_LEFT>\n :highlight-right: <LINES_TO_HIGHLIGHT_RIGHT>\n \n <CODE_BLOCK_LEFT>\n ---\n <CODE_BLOCK_RIGHT>\n\"\"\"\n\nclass CodeDiffParser:\n def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'):\n if code_sep not in lines:\n raise ValueError('Code separator not found! Code snippets should be '\n f'separated by {code_sep}.')\n idx = lines.index(code_sep)\n code_left = self._code_block(lines[0: idx])\n code_right = self._code_block(lines[idx+1:])\n \n self.max_left = max(len(x) for x in code_left + [title_left])\n self.max_right = max(len(x) for x in code_right + [title_right])\n\n output = [\n self._hline(),\n self._table_row(title_left, title_right),\n self._hline(),\n ]\n\n for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''):\n output += [self._table_row(l, r)]\n\n return output + [self._hline()]\n\n def _code_block(self, lines):\n # Remove right trailing whitespace so we can detect the comments.\n lines = [x.rstrip() for x in lines]\n highlight = lambda x : x.endswith('#!')\n code = map(lambda x : x[:-2].rstrip() if highlight(x) else x, lines)\n highlights = [i+1 for i in range(len(lines)) if highlight(lines[i])]\n highlights = ','.join(str(i) for i in highlights)\n\n directive = ['.. code-block:: python']\n if highlights:\n directive += [f' :emphasize-lines: {highlights}']\n\n # Indent code and add empty line so the code is picked up by the directive.\n return directive + [''] + list(map(lambda x: ' ' + x, code))\n\n def _hline(self):\n return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+'\n\n def _rfill(self, text, max_len):\n return text + ' ' * (max_len-len(text))\n\n def _table_row(self, left, right):\n text_left = self._rfill(left, self.max_left)\n text_right = self._rfill(right, self.max_right)\n return '| ' + text_left + ' | ' + text_right + ' |'\n\n\nclass CodeDiffDirective(SphinxDirective):\n has_content = True\n option_spec = {\n 'title_left': directives.unchanged,\n 'title_right': directives.unchanged,\n 'code_sep': directives.unchanged,\n }\n\n def run(self): \n new_content = CodeDiffParser().parse(list(self.content), **self.options)\n\n node = nodes.paragraph()\n self.content = ViewList(new_content, self.content.parent)\n self.state.nested_parse(self.content, self.content_offset, node)\n return [node]\n\ndef setup(app):\n app.add_directive('codediff', CodeDiffDirective)\n\n return {\n 'version': sphinx.__display_version__,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/_ext/codediff.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport dataclasses\nfrom typing import Optional, Sequence\nimport itertools\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import ViewList\n\nimport sphinx\nfrom sphinx.util.docutils import SphinxDirective\n\"\"\"Sphinx directive for creating code diff tables.\n\nUse directive as follows:\n\n.. codediff::\n :title_left: <LEFT_CODE_BLOCK_TITLE>\n :title_right: <RIGHT_CODE_BLOCK_TITLE>\n \n <CODE_BLOCK_LEFT>\n ---\n <CODE_BLOCK_RIGHT>\n\nIn order to highlight a line of code, prepend it with \"#!\".\n\"\"\"\n\nclass CodeDiffParser:\n def parse(self, lines, title_left='Base', title_right='Diff', code_sep='---'):\n if code_sep not in lines:\n raise ValueError('Code separator not found! Code snippets should be '\n f'separated by {code_sep}.')\n idx = lines.index(code_sep)\n code_left = self._code_block(lines[0: idx])\n code_right = self._code_block(lines[idx+1:])\n \n self.max_left = max(len(x) for x in code_left + [title_left])\n self.max_right = max(len(x) for x in code_right + [title_right])\n\n output = [\n self._hline(),\n self._table_row(title_left, title_right),\n self._hline(),\n ]\n\n for l, r in itertools.zip_longest(code_left, code_right, fillvalue=''):\n output += [self._table_row(l, r)]\n\n return output + [self._hline()]\n\n def _code_block(self, lines):\n # Remove right trailing whitespace so we can detect the comments.\n lines = [x.rstrip() for x in lines]\n highlight = lambda x : x.endswith('#!')\n code = map(lambda x : x[:-2].rstrip() if highlight(x) else x, lines)\n highlights = [i+1 for i in range(len(lines)) if highlight(lines[i])]\n highlights = ','.join(str(i) for i in highlights)\n\n directive = ['.. code-block:: python']\n if highlights:\n directive += [f' :emphasize-lines: {highlights}']\n\n # Indent code and add empty line so the code is picked up by the directive.\n return directive + [''] + list(map(lambda x: ' ' + x, code))\n\n def _hline(self):\n return '+' + '-'*(self.max_left+2) + '+' + '-'*(self.max_right+2) + '+'\n\n def _rfill(self, text, max_len):\n return text + ' ' * (max_len-len(text))\n\n def _table_row(self, left, right):\n text_left = self._rfill(left, self.max_left)\n text_right = self._rfill(right, self.max_right)\n return '| ' + text_left + ' | ' + text_right + ' |'\n\n\nclass CodeDiffDirective(SphinxDirective):\n has_content = True\n option_spec = {\n 'title_left': directives.unchanged,\n 'title_right': directives.unchanged,\n 'code_sep': directives.unchanged,\n }\n\n def run(self):\n new_content = CodeDiffParser().parse(list(self.content), **self.options)\n\n node = nodes.paragraph()\n self.content = ViewList(new_content, self.content.parent)\n self.state.nested_parse(self.content, self.content_offset, node)\n return [node]\n\ndef setup(app):\n app.add_directive('codediff', CodeDiffDirective)\n\n return {\n 'version': sphinx.__display_version__,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/_ext/codediff.py"}]} | 1,495 | 242 |
gh_patches_debug_34734 | rasdani/github-patches | git_diff | astronomer__astro-sdk-455 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `export_file` to return `File` object
**Context**
In order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.
* Astro-SDK version: 0.9..1
* Request by: @jlaneve
* Analysed by @dimberman
**Problem**
At the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:
```
output_file = File(path="/tmp/saved_df.csv")
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
export = aql.export_file(
input_data=table,
output_file=output_file,
if_exists="replace",
)
res_df = aql.load_file(input_file=output_file)
export >> res_df
```
**Desired behaviour**
```
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
exported_file = aql.export_file(
input_data=table,
output_file=File(path="/tmp/saved_df.csv"),
if_exists="replace",
)
res_df = aql.load_file(input_file=exported_file)
```
**Acceptance criteria**
* Change `export_file` so it returns the `File` instance, as opposed to `None`
Since there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/astro/sql/operators/export_file.py`
Content:
```
1 from typing import Optional, Union
2
3 import pandas as pd
4 from airflow.models import BaseOperator
5 from airflow.models.xcom_arg import XComArg
6
7 from astro.constants import ExportExistsStrategy
8 from astro.databases import create_database
9 from astro.files import File
10 from astro.sql.table import Table
11 from astro.utils.task_id_helper import get_task_id
12
13
14 class ExportFile(BaseOperator):
15 """Write SQL table to csv/parquet on local/S3/GCS.
16
17 :param input_data: Table to convert to file
18 :param output_file: File object containing the path to the file and connection id.
19 :param if_exists: Overwrite file if exists. Default False.
20 """
21
22 template_fields = ("input_data", "output_file")
23
24 def __init__(
25 self,
26 input_data: Union[Table, pd.DataFrame],
27 output_file: File,
28 if_exists: ExportExistsStrategy = "exception",
29 **kwargs,
30 ) -> None:
31 super().__init__(**kwargs)
32 self.output_file = output_file
33 self.input_data = input_data
34 self.if_exists = if_exists
35 self.kwargs = kwargs
36
37 def execute(self, context: dict) -> None:
38 """Write SQL table to csv/parquet on local/S3/GCS.
39
40 Infers SQL database type based on connection.
41 """
42 # Infer db type from `input_conn_id`.
43 if isinstance(self.input_data, Table):
44 database = create_database(self.input_data.conn_id)
45 self.input_data = database.populate_table_metadata(self.input_data)
46 df = database.export_table_to_pandas_dataframe(self.input_data)
47 elif isinstance(self.input_data, pd.DataFrame):
48 df = self.input_data
49 else:
50 raise ValueError(
51 f"Expected input_table to be Table or dataframe. Got {type(self.input_data)}"
52 )
53 # Write file if overwrite == True or if file doesn't exist.
54 if self.if_exists == "replace" or not self.output_file.exists():
55 self.output_file.create_from_dataframe(df)
56 else:
57 raise FileExistsError(f"{self.output_file.path} file already exists.")
58
59
60 def export_file(
61 input_data: Union[Table, pd.DataFrame],
62 output_file: File,
63 if_exists: ExportExistsStrategy = "exception",
64 task_id: Optional[str] = None,
65 **kwargs,
66 ) -> XComArg:
67 """Convert SaveFile into a function. Returns XComArg.
68
69 Returns an XComArg object.
70
71 :param output_file: Path and conn_id
72 :param input_data: Input table / dataframe
73 :param if_exists: Overwrite file if exists. Default "exception"
74 :param task_id: task id, optional
75 """
76
77 task_id = (
78 task_id if task_id is not None else get_task_id("export_file", output_file.path)
79 )
80
81 return ExportFile(
82 task_id=task_id,
83 output_file=output_file,
84 input_data=input_data,
85 if_exists=if_exists,
86 ).output
87
```
Path: `src/astro/__init__.py`
Content:
```
1 """A decorator that allows users to run SQL queries natively in Airflow."""
2
3 __version__ = "0.9.1"
4
5 # The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`
6 # Without this, if we run the following imports, in this specific order:
7 # from astro.databases import create_database
8 # from astro.sql.table import Metadata, Table, create_unique_table_name
9 # We face ImportError, as it happened in:
10 # https://github.com/astronomer/astro-sdk/pull/396/commits/fbe73bdbe46d65777258a5f79f461ef69f08a673
11 # https://github.com/astronomer/astro-sdk/actions/runs/2378526135
12 # Although astro.database does not depend on astro.sql, it depends on astro.sql.table - and, unless astro.sql was
13 # imported beforehand, it will also load astro.sql. In astro.sql we import lots of operators which depend on
14 # astro.database, and this is what leads to the circular dependency.
15 import astro.sql # noqa: F401
16
17
18 # This is needed to allow Airflow to pick up specific metadata fields it needs
19 # for certain features. We recognize it's a bit unclean to define these in
20 # multiple places, but at this point it's the only workaround if you'd like
21 # your custom conn type to show up in the Airflow UI.
22 def get_provider_info() -> dict:
23 return {
24 # Required.
25 "package-name": "astro-sdk-python",
26 "name": "Astro SQL Provider",
27 "description": __doc__,
28 "versions": [__version__],
29 # Optional.
30 "hook-class-names": [],
31 "extra-links": [],
32 }
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/astro/__init__.py b/src/astro/__init__.py
--- a/src/astro/__init__.py
+++ b/src/astro/__init__.py
@@ -1,6 +1,6 @@
"""A decorator that allows users to run SQL queries natively in Airflow."""
-__version__ = "0.9.1"
+__version__ = "0.9.2"
# The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`
# Without this, if we run the following imports, in this specific order:
diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py
--- a/src/astro/sql/operators/export_file.py
+++ b/src/astro/sql/operators/export_file.py
@@ -34,7 +34,7 @@
self.if_exists = if_exists
self.kwargs = kwargs
- def execute(self, context: dict) -> None:
+ def execute(self, context: dict) -> File:
"""Write SQL table to csv/parquet on local/S3/GCS.
Infers SQL database type based on connection.
@@ -53,6 +53,7 @@
# Write file if overwrite == True or if file doesn't exist.
if self.if_exists == "replace" or not self.output_file.exists():
self.output_file.create_from_dataframe(df)
+ return self.output_file
else:
raise FileExistsError(f"{self.output_file.path} file already exists.")
@@ -66,7 +67,20 @@
) -> XComArg:
"""Convert SaveFile into a function. Returns XComArg.
- Returns an XComArg object.
+ Returns an XComArg object of type File which matches the output_file parameter.
+
+ This will allow users to perform further actions with the exported file.
+
+ e.g.
+
+ with sample_dag:
+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
+ exported_file = aql.export_file(
+ input_data=table,
+ output_file=File(path="/tmp/saved_df.csv"),
+ if_exists="replace",
+ )
+ res_df = aql.load_file(input_file=exported_file)
:param output_file: Path and conn_id
:param input_data: Input table / dataframe
| {"golden_diff": "diff --git a/src/astro/__init__.py b/src/astro/__init__.py\n--- a/src/astro/__init__.py\n+++ b/src/astro/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"A decorator that allows users to run SQL queries natively in Airflow.\"\"\"\n \n-__version__ = \"0.9.1\"\n+__version__ = \"0.9.2\"\n \n # The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`\n # Without this, if we run the following imports, in this specific order:\ndiff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py\n--- a/src/astro/sql/operators/export_file.py\n+++ b/src/astro/sql/operators/export_file.py\n@@ -34,7 +34,7 @@\n self.if_exists = if_exists\n self.kwargs = kwargs\n \n- def execute(self, context: dict) -> None:\n+ def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n \n Infers SQL database type based on connection.\n@@ -53,6 +53,7 @@\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n+ return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n \n@@ -66,7 +67,20 @@\n ) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n \n- Returns an XComArg object.\n+ Returns an XComArg object of type File which matches the output_file parameter.\n+\n+ This will allow users to perform further actions with the exported file.\n+\n+ e.g.\n+\n+ with sample_dag:\n+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n+ exported_file = aql.export_file(\n+ input_data=table,\n+ output_file=File(path=\"/tmp/saved_df.csv\"),\n+ if_exists=\"replace\",\n+ )\n+ res_df = aql.load_file(input_file=exported_file)\n \n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n", "issue": "Change `export_file` to return `File` object\n**Context**\r\n\r\nIn order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.\r\n\r\n* Astro-SDK version: 0.9..1\r\n* Request by: @jlaneve\r\n* Analysed by @dimberman \r\n\r\n**Problem**\r\n\r\nAt the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:\r\n\r\n```\r\n output_file = File(path=\"/tmp/saved_df.csv\")\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n export = aql.export_file(\r\n input_data=table,\r\n output_file=output_file,\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=output_file)\r\n export >> res_df\r\n```\r\n\r\n**Desired behaviour**\r\n\r\n```\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n exported_file = aql.export_file(\r\n input_data=table,\r\n output_file=File(path=\"/tmp/saved_df.csv\"),\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=exported_file)\r\n```\r\n\r\n**Acceptance criteria**\r\n* Change `export_file` so it returns the `File` instance, as opposed to `None`\r\n\r\nSince there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> None:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object.\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}, {"content": "\"\"\"A decorator that allows users to run SQL queries natively in Airflow.\"\"\"\n\n__version__ = \"0.9.1\"\n\n# The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`\n# Without this, if we run the following imports, in this specific order:\n# from astro.databases import create_database\n# from astro.sql.table import Metadata, Table, create_unique_table_name\n# We face ImportError, as it happened in:\n# https://github.com/astronomer/astro-sdk/pull/396/commits/fbe73bdbe46d65777258a5f79f461ef69f08a673\n# https://github.com/astronomer/astro-sdk/actions/runs/2378526135\n# Although astro.database does not depend on astro.sql, it depends on astro.sql.table - and, unless astro.sql was\n# imported beforehand, it will also load astro.sql. In astro.sql we import lots of operators which depend on\n# astro.database, and this is what leads to the circular dependency.\nimport astro.sql # noqa: F401\n\n\n# This is needed to allow Airflow to pick up specific metadata fields it needs\n# for certain features. We recognize it's a bit unclean to define these in\n# multiple places, but at this point it's the only workaround if you'd like\n# your custom conn type to show up in the Airflow UI.\ndef get_provider_info() -> dict:\n return {\n # Required.\n \"package-name\": \"astro-sdk-python\",\n \"name\": \"Astro SQL Provider\",\n \"description\": __doc__,\n \"versions\": [__version__],\n # Optional.\n \"hook-class-names\": [],\n \"extra-links\": [],\n }\n", "path": "src/astro/__init__.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object of type File which matches the output_file parameter.\n\n This will allow users to perform further actions with the exported file.\n\n e.g.\n\n with sample_dag:\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n exported_file = aql.export_file(\n input_data=table,\n output_file=File(path=\"/tmp/saved_df.csv\"),\n if_exists=\"replace\",\n )\n res_df = aql.load_file(input_file=exported_file)\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}, {"content": "\"\"\"A decorator that allows users to run SQL queries natively in Airflow.\"\"\"\n\n__version__ = \"0.9.2\"\n\n# The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`\n# Without this, if we run the following imports, in this specific order:\n# from astro.databases import create_database\n# from astro.sql.table import Metadata, Table, create_unique_table_name\n# We face ImportError, as it happened in:\n# https://github.com/astronomer/astro-sdk/pull/396/commits/fbe73bdbe46d65777258a5f79f461ef69f08a673\n# https://github.com/astronomer/astro-sdk/actions/runs/2378526135\n# Although astro.database does not depend on astro.sql, it depends on astro.sql.table - and, unless astro.sql was\n# imported beforehand, it will also load astro.sql. In astro.sql we import lots of operators which depend on\n# astro.database, and this is what leads to the circular dependency.\nimport astro.sql # noqa: F401\n\n\n# This is needed to allow Airflow to pick up specific metadata fields it needs\n# for certain features. We recognize it's a bit unclean to define these in\n# multiple places, but at this point it's the only workaround if you'd like\n# your custom conn type to show up in the Airflow UI.\ndef get_provider_info() -> dict:\n return {\n # Required.\n \"package-name\": \"astro-sdk-python\",\n \"name\": \"Astro SQL Provider\",\n \"description\": __doc__,\n \"versions\": [__version__],\n # Optional.\n \"hook-class-names\": [],\n \"extra-links\": [],\n }\n", "path": "src/astro/__init__.py"}]} | 1,891 | 525 |
gh_patches_debug_3737 | rasdani/github-patches | git_diff | intel__dffml-529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docs: Enable hiding of Python prompts
This will be very helpful for copy pasting examples.
References:
- https://github.com/readthedocs/sphinx_rtd_theme/issues/167
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 import sys
15 import pathlib
16
17 sys.path.insert(0, os.path.abspath("."))
18 from dffml.version import VERSION
19
20 # -- Project information -----------------------------------------------------
21
22 project = "DFFML"
23 copyright = "2019, Intel"
24 author = "John Andersen"
25
26 # The short X.Y version
27 version = VERSION
28
29 # The full version, including alpha/beta/rc tags
30 release = version
31
32
33 # -- General configuration ---------------------------------------------------
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 "sphinx.ext.intersphinx",
40 "sphinx.ext.autodoc",
41 "sphinx.ext.viewcode",
42 "sphinx.ext.napoleon",
43 "sphinx.ext.doctest",
44 "recommonmark",
45 ]
46
47 intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
48
49 # Add any paths that contain templates here, relative to this directory.
50 templates_path = ["_templates"]
51
52 # List of patterns, relative to source directory, that match files and
53 # directories to ignore when looking for source files.
54 # This pattern also affects html_static_path and html_extra_path.
55 exclude_patterns = []
56
57 # Enable markdown
58 source_suffix = {
59 ".rst": "restructuredtext",
60 ".txt": "markdown",
61 ".md": "markdown",
62 }
63
64
65 # -- Options for HTML output -------------------------------------------------
66
67 # The theme to use for HTML and HTML Help pages. See the documentation for
68 # a list of builtin themes.
69 #
70 html_theme = "sphinx_rtd_theme"
71
72 html_context = {
73 "github_user": "intel",
74 "github_repo": "dffml",
75 "github_version": "master",
76 "conf_py_path": "/docs/",
77 "display_github": True,
78 }
79
80 html_theme_options = {
81 "description": "The fastest path to machine learning integration",
82 "github_url": "https://github.com/intel/dffml/",
83 }
84
85 # Add any paths that contain custom static files (such as style sheets) here,
86 # relative to this directory. They are copied after the builtin static files,
87 # so a file named "default.css" will overwrite the builtin "default.css".
88 html_static_path = ["_static"]
89
90 # -- Extension configuration -------------------------------------------------
91
92 napoleon_numpy_docstring = True
93
94 doctest_global_setup = (
95 pathlib.Path(__file__).parent / "doctest_header.py"
96 ).read_text()
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -87,6 +87,11 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
+
+def setup(app):
+ app.add_javascript("copybutton.js")
+
+
# -- Extension configuration -------------------------------------------------
napoleon_numpy_docstring = True
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -87,6 +87,11 @@\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = [\"_static\"]\n \n+\n+def setup(app):\n+ app.add_javascript(\"copybutton.js\")\n+\n+\n # -- Extension configuration -------------------------------------------------\n \n napoleon_numpy_docstring = True\n", "issue": "docs: Enable hiding of Python prompts\nThis will be very helpful for copy pasting examples.\r\n\r\nReferences:\r\n- https://github.com/readthedocs/sphinx_rtd_theme/issues/167\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport pathlib\n\nsys.path.insert(0, os.path.abspath(\".\"))\nfrom dffml.version import VERSION\n\n# -- Project information -----------------------------------------------------\n\nproject = \"DFFML\"\ncopyright = \"2019, Intel\"\nauthor = \"John Andersen\"\n\n# The short X.Y version\nversion = VERSION\n\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.doctest\",\n \"recommonmark\",\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# Enable markdown\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"markdown\",\n \".md\": \"markdown\",\n}\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\nhtml_context = {\n \"github_user\": \"intel\",\n \"github_repo\": \"dffml\",\n \"github_version\": \"master\",\n \"conf_py_path\": \"/docs/\",\n \"display_github\": True,\n}\n\nhtml_theme_options = {\n \"description\": \"The fastest path to machine learning integration\",\n \"github_url\": \"https://github.com/intel/dffml/\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# -- Extension configuration -------------------------------------------------\n\nnapoleon_numpy_docstring = True\n\ndoctest_global_setup = (\n pathlib.Path(__file__).parent / \"doctest_header.py\"\n).read_text()\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport pathlib\n\nsys.path.insert(0, os.path.abspath(\".\"))\nfrom dffml.version import VERSION\n\n# -- Project information -----------------------------------------------------\n\nproject = \"DFFML\"\ncopyright = \"2019, Intel\"\nauthor = \"John Andersen\"\n\n# The short X.Y version\nversion = VERSION\n\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.doctest\",\n \"recommonmark\",\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# Enable markdown\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"markdown\",\n \".md\": \"markdown\",\n}\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\nhtml_context = {\n \"github_user\": \"intel\",\n \"github_repo\": \"dffml\",\n \"github_version\": \"master\",\n \"conf_py_path\": \"/docs/\",\n \"display_github\": True,\n}\n\nhtml_theme_options = {\n \"description\": \"The fastest path to machine learning integration\",\n \"github_url\": \"https://github.com/intel/dffml/\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\ndef setup(app):\n app.add_javascript(\"copybutton.js\")\n\n\n# -- Extension configuration -------------------------------------------------\n\nnapoleon_numpy_docstring = True\n\ndoctest_global_setup = (\n pathlib.Path(__file__).parent / \"doctest_header.py\"\n).read_text()\n", "path": "docs/conf.py"}]} | 1,122 | 97 |
gh_patches_debug_13871 | rasdani/github-patches | git_diff | scrapy__scrapy-4207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scrapy does not use a non-zero exit code when pipeline's open_spider throws the exception
<!--
Thanks for taking an interest in Scrapy!
If you have a question that starts with "How to...", please see the Scrapy Community page: https://scrapy.org/community/.
The GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.
Keep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md
The following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs
-->
### Description
In our case, we execute command `scrapy crawl` in airflow task and the exit code would be used to judge this task success or failure. I agree that `scrapy crawl` ignores spider exceptions because it's unpredictable in the crawling process.
Back to our case, we export data to file or database in the pipeline and we create the directory or database connection in `open_spider(self, spider)`. I think if there is an exception happens during this function, it's reasonable to propagate a non-zero exit code. it because we normally do some initialization in this function.
### Steps to Reproduce
- scrapy startproject test_spider
- cd test_spider
- scrapy genspider example example.com
- modify spiders/example.py to
```
# -*- coding: utf-8 -*-
import scrapy
class ExampleSpider(scrapy.Spider):
name = 'example'
allowed_domains = ['example.com']
start_urls = ['http://example.com/']
custom_settings = {
'ITEM_PIPELINES': {
'test_spider.pipelines.TestSpiderPipeline': 300
}
}
def parse(self, response):
pass
```
- modify pipelines.py to
```
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class TestSpiderPipeline(object):
def open_spider(self, spider):
raise Exception('error')
def process_item(self, item, spider):
return item
```
- scrapy crawl example
- echo $?
**Expected behavior:** [What you expect to happen]
return non-zero exit code
**Actual behavior:** [What actually happens]
return zero exit code
**Reproduces how often:** [What percentage of the time does it reproduce?]
100%
### Versions
Scrapy : 1.8.0
lxml : 4.3.3.0
libxml2 : 2.9.9
cssselect : 1.0.3
parsel : 1.5.1
w3lib : 1.20.0
Twisted : 19.2.0
Python : 3.7.3 (default, Mar 27 2019, 09:23:39) - [Clang 10.0.0 (clang-1000.11.45.5)]
pyOpenSSL : 19.0.0 (OpenSSL 1.1.1b 26 Feb 2019)
cryptography : 2.6.1
Platform : Darwin-18.5.0-x86_64-i386-64bit
### Additional context
I could get the expected behavior if I change `def run(self, args, opts)` in scrapy/commands/crawl.py to
```
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
res = self.crawler_process.crawl(spname, **opts.spargs)
if hasattr(res, 'result') and res.result is not None and issubclass(res.result.type, Exception):
self.exitcode = 1
else:
self.crawler_process.start()
if self.crawler_process.bootstrap_failed:
self.exitcode = 1
```
original `def run(self, args, opts)`
```
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
self.crawler_process.crawl(spname, **opts.spargs)
self.crawler_process.start()
if self.crawler_process.bootstrap_failed:
self.exitcode = 1
```
Is it the proper way to modify the code for achieving this purpose? if it is, could I create a PR request for this issue?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/commands/crawl.py`
Content:
```
1 import os
2 from scrapy.commands import ScrapyCommand
3 from scrapy.utils.conf import arglist_to_dict
4 from scrapy.utils.python import without_none_values
5 from scrapy.exceptions import UsageError
6
7
8 class Command(ScrapyCommand):
9
10 requires_project = True
11
12 def syntax(self):
13 return "[options] <spider>"
14
15 def short_desc(self):
16 return "Run a spider"
17
18 def add_options(self, parser):
19 ScrapyCommand.add_options(self, parser)
20 parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
21 help="set spider argument (may be repeated)")
22 parser.add_option("-o", "--output", metavar="FILE",
23 help="dump scraped items into FILE (use - for stdout)")
24 parser.add_option("-t", "--output-format", metavar="FORMAT",
25 help="format to use for dumping items with -o")
26
27 def process_options(self, args, opts):
28 ScrapyCommand.process_options(self, args, opts)
29 try:
30 opts.spargs = arglist_to_dict(opts.spargs)
31 except ValueError:
32 raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
33 if opts.output:
34 if opts.output == '-':
35 self.settings.set('FEED_URI', 'stdout:', priority='cmdline')
36 else:
37 self.settings.set('FEED_URI', opts.output, priority='cmdline')
38 feed_exporters = without_none_values(
39 self.settings.getwithbase('FEED_EXPORTERS'))
40 valid_output_formats = feed_exporters.keys()
41 if not opts.output_format:
42 opts.output_format = os.path.splitext(opts.output)[1].replace(".", "")
43 if opts.output_format not in valid_output_formats:
44 raise UsageError("Unrecognized output format '%s', set one"
45 " using the '-t' switch or as a file extension"
46 " from the supported list %s" % (opts.output_format,
47 tuple(valid_output_formats)))
48 self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')
49
50 def run(self, args, opts):
51 if len(args) < 1:
52 raise UsageError()
53 elif len(args) > 1:
54 raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
55 spname = args[0]
56
57 self.crawler_process.crawl(spname, **opts.spargs)
58 self.crawler_process.start()
59
60 if self.crawler_process.bootstrap_failed:
61 self.exitcode = 1
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/commands/crawl.py b/scrapy/commands/crawl.py
--- a/scrapy/commands/crawl.py
+++ b/scrapy/commands/crawl.py
@@ -54,8 +54,13 @@
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
- self.crawler_process.crawl(spname, **opts.spargs)
- self.crawler_process.start()
+ crawl_defer = self.crawler_process.crawl(spname, **opts.spargs)
- if self.crawler_process.bootstrap_failed:
+ if getattr(crawl_defer, 'result', None) is not None and issubclass(crawl_defer.result.type, Exception):
self.exitcode = 1
+ else:
+ self.crawler_process.start()
+
+ if self.crawler_process.bootstrap_failed or \
+ (hasattr(self.crawler_process, 'has_exception') and self.crawler_process.has_exception):
+ self.exitcode = 1
| {"golden_diff": "diff --git a/scrapy/commands/crawl.py b/scrapy/commands/crawl.py\n--- a/scrapy/commands/crawl.py\n+++ b/scrapy/commands/crawl.py\n@@ -54,8 +54,13 @@\n raise UsageError(\"running 'scrapy crawl' with more than one spider is no longer supported\")\n spname = args[0]\n \n- self.crawler_process.crawl(spname, **opts.spargs)\n- self.crawler_process.start()\n+ crawl_defer = self.crawler_process.crawl(spname, **opts.spargs)\n \n- if self.crawler_process.bootstrap_failed:\n+ if getattr(crawl_defer, 'result', None) is not None and issubclass(crawl_defer.result.type, Exception):\n self.exitcode = 1\n+ else:\n+ self.crawler_process.start()\n+\n+ if self.crawler_process.bootstrap_failed or \\\n+ (hasattr(self.crawler_process, 'has_exception') and self.crawler_process.has_exception):\n+ self.exitcode = 1\n", "issue": "Scrapy does not use a non-zero exit code when pipeline's open_spider throws the exception\n<!--\r\n\r\nThanks for taking an interest in Scrapy!\r\n\r\nIf you have a question that starts with \"How to...\", please see the Scrapy Community page: https://scrapy.org/community/.\r\nThe GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.\r\n\r\nKeep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md\r\n\r\nThe following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs\r\n\r\n-->\r\n\r\n### Description\r\nIn our case, we execute command `scrapy crawl` in airflow task and the exit code would be used to judge this task success or failure. I agree that `scrapy crawl` ignores spider exceptions because it's unpredictable in the crawling process. \r\n\r\nBack to our case, we export data to file or database in the pipeline and we create the directory or database connection in `open_spider(self, spider)`. I think if there is an exception happens during this function, it's reasonable to propagate a non-zero exit code. it because we normally do some initialization in this function.\r\n\r\n### Steps to Reproduce\r\n\r\n- scrapy startproject test_spider\r\n- cd test_spider\r\n- scrapy genspider example example.com\r\n- modify spiders/example.py to \r\n```\r\n# -*- coding: utf-8 -*-\r\nimport scrapy\r\n\r\n\r\nclass ExampleSpider(scrapy.Spider):\r\n name = 'example'\r\n allowed_domains = ['example.com']\r\n start_urls = ['http://example.com/']\r\n\r\n custom_settings = {\r\n 'ITEM_PIPELINES': {\r\n 'test_spider.pipelines.TestSpiderPipeline': 300\r\n }\r\n }\r\n\r\n def parse(self, response):\r\n pass\r\n```\r\n- modify pipelines.py to \r\n```\r\n# -*- coding: utf-8 -*-\r\n\r\n# Define your item pipelines here\r\n#\r\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\r\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\r\n\r\n\r\nclass TestSpiderPipeline(object):\r\n\r\n def open_spider(self, spider):\r\n raise Exception('error')\r\n\r\n def process_item(self, item, spider):\r\n return item\r\n```\r\n- scrapy crawl example\r\n- echo $? \r\n\r\n**Expected behavior:** [What you expect to happen]\r\nreturn non-zero exit code\r\n\r\n**Actual behavior:** [What actually happens]\r\nreturn zero exit code\r\n\r\n**Reproduces how often:** [What percentage of the time does it reproduce?]\r\n100%\r\n\r\n### Versions\r\nScrapy : 1.8.0\r\nlxml : 4.3.3.0\r\nlibxml2 : 2.9.9\r\ncssselect : 1.0.3\r\nparsel : 1.5.1\r\nw3lib : 1.20.0\r\nTwisted : 19.2.0\r\nPython : 3.7.3 (default, Mar 27 2019, 09:23:39) - [Clang 10.0.0 (clang-1000.11.45.5)]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1b 26 Feb 2019)\r\ncryptography : 2.6.1\r\nPlatform : Darwin-18.5.0-x86_64-i386-64bit\r\n\r\n### Additional context\r\n\r\nI could get the expected behavior if I change `def run(self, args, opts)` in scrapy/commands/crawl.py to \r\n```\r\n def run(self, args, opts):\r\n if len(args) < 1:\r\n raise UsageError()\r\n elif len(args) > 1:\r\n raise UsageError(\"running 'scrapy crawl' with more than one spider is no longer supported\")\r\n spname = args[0]\r\n\r\n res = self.crawler_process.crawl(spname, **opts.spargs)\r\n\r\n if hasattr(res, 'result') and res.result is not None and issubclass(res.result.type, Exception):\r\n self.exitcode = 1\r\n else:\r\n self.crawler_process.start()\r\n\r\n if self.crawler_process.bootstrap_failed:\r\n self.exitcode = 1\r\n```\r\noriginal `def run(self, args, opts)`\r\n```\r\n def run(self, args, opts):\r\n if len(args) < 1:\r\n raise UsageError()\r\n elif len(args) > 1:\r\n raise UsageError(\"running 'scrapy crawl' with more than one spider is no longer supported\")\r\n spname = args[0]\r\n\r\n self.crawler_process.crawl(spname, **opts.spargs)\r\n self.crawler_process.start()\r\n\r\n if self.crawler_process.bootstrap_failed:\r\n self.exitcode = 1\r\n```\r\n\r\nIs it the proper way to modify the code for achieving this purpose? if it is, could I create a PR request for this issue?\r\n\n", "before_files": [{"content": "import os\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.conf import arglist_to_dict\nfrom scrapy.utils.python import without_none_values\nfrom scrapy.exceptions import UsageError\n\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Run a spider\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-a\", dest=\"spargs\", action=\"append\", default=[], metavar=\"NAME=VALUE\",\n help=\"set spider argument (may be repeated)\")\n parser.add_option(\"-o\", \"--output\", metavar=\"FILE\",\n help=\"dump scraped items into FILE (use - for stdout)\")\n parser.add_option(\"-t\", \"--output-format\", metavar=\"FORMAT\",\n help=\"format to use for dumping items with -o\")\n\n def process_options(self, args, opts):\n ScrapyCommand.process_options(self, args, opts)\n try:\n opts.spargs = arglist_to_dict(opts.spargs)\n except ValueError:\n raise UsageError(\"Invalid -a value, use -a NAME=VALUE\", print_help=False)\n if opts.output:\n if opts.output == '-':\n self.settings.set('FEED_URI', 'stdout:', priority='cmdline')\n else:\n self.settings.set('FEED_URI', opts.output, priority='cmdline')\n feed_exporters = without_none_values(\n self.settings.getwithbase('FEED_EXPORTERS'))\n valid_output_formats = feed_exporters.keys()\n if not opts.output_format:\n opts.output_format = os.path.splitext(opts.output)[1].replace(\".\", \"\")\n if opts.output_format not in valid_output_formats:\n raise UsageError(\"Unrecognized output format '%s', set one\"\n \" using the '-t' switch or as a file extension\"\n \" from the supported list %s\" % (opts.output_format,\n tuple(valid_output_formats)))\n self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')\n\n def run(self, args, opts):\n if len(args) < 1:\n raise UsageError()\n elif len(args) > 1:\n raise UsageError(\"running 'scrapy crawl' with more than one spider is no longer supported\")\n spname = args[0]\n\n self.crawler_process.crawl(spname, **opts.spargs)\n self.crawler_process.start()\n\n if self.crawler_process.bootstrap_failed:\n self.exitcode = 1\n", "path": "scrapy/commands/crawl.py"}], "after_files": [{"content": "import os\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.conf import arglist_to_dict\nfrom scrapy.utils.python import without_none_values\nfrom scrapy.exceptions import UsageError\n\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Run a spider\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-a\", dest=\"spargs\", action=\"append\", default=[], metavar=\"NAME=VALUE\",\n help=\"set spider argument (may be repeated)\")\n parser.add_option(\"-o\", \"--output\", metavar=\"FILE\",\n help=\"dump scraped items into FILE (use - for stdout)\")\n parser.add_option(\"-t\", \"--output-format\", metavar=\"FORMAT\",\n help=\"format to use for dumping items with -o\")\n\n def process_options(self, args, opts):\n ScrapyCommand.process_options(self, args, opts)\n try:\n opts.spargs = arglist_to_dict(opts.spargs)\n except ValueError:\n raise UsageError(\"Invalid -a value, use -a NAME=VALUE\", print_help=False)\n if opts.output:\n if opts.output == '-':\n self.settings.set('FEED_URI', 'stdout:', priority='cmdline')\n else:\n self.settings.set('FEED_URI', opts.output, priority='cmdline')\n feed_exporters = without_none_values(\n self.settings.getwithbase('FEED_EXPORTERS'))\n valid_output_formats = feed_exporters.keys()\n if not opts.output_format:\n opts.output_format = os.path.splitext(opts.output)[1].replace(\".\", \"\")\n if opts.output_format not in valid_output_formats:\n raise UsageError(\"Unrecognized output format '%s', set one\"\n \" using the '-t' switch or as a file extension\"\n \" from the supported list %s\" % (opts.output_format,\n tuple(valid_output_formats)))\n self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')\n\n def run(self, args, opts):\n if len(args) < 1:\n raise UsageError()\n elif len(args) > 1:\n raise UsageError(\"running 'scrapy crawl' with more than one spider is no longer supported\")\n spname = args[0]\n\n crawl_defer = self.crawler_process.crawl(spname, **opts.spargs)\n\n if getattr(crawl_defer, 'result', None) is not None and issubclass(crawl_defer.result.type, Exception):\n self.exitcode = 1\n else:\n self.crawler_process.start()\n\n if self.crawler_process.bootstrap_failed or \\\n (hasattr(self.crawler_process, 'has_exception') and self.crawler_process.has_exception):\n self.exitcode = 1\n", "path": "scrapy/commands/crawl.py"}]} | 2,016 | 230 |
gh_patches_debug_28893 | rasdani/github-patches | git_diff | mirumee__ariadne-387 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Argument 'code' has invalid value "ABC"
I think there is a bug when using both literal and variable values with a custom scalar.
```python
from ariadne import ScalarType
testscalar = ScalarType('TestScalar')
@testscalar.serializer
def serializer(value):
return value.upper()
@testscalar.value_parser
def value_parser(value):
if value:
return serializer(value)
@testscalar.literal_parser
def literal_parser(ast):
value = str(ast.value)
return value_parser(value)
```
If you then make the following query:
```graphql
query($code: TestScalar) {
test1: testType(code: $code) {
id
}
test2: testType(code: "ABC") {
id
}
}
```
This error is returned: Argument 'code' has invalid value "ABC"
If you don't pass variables and only use "literal" values it works. Same for if you only pass variables it works fine.
If you don't set up a resolver for "testType" then no error is returned.
Not sure what is happening but I think this is a bug. If not, does anyone know why this is happening?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/scalars.py`
Content:
```
1 from typing import Optional, cast
2
3 from graphql.language.ast import (
4 BooleanValueNode,
5 FloatValueNode,
6 IntValueNode,
7 StringValueNode,
8 )
9 from graphql.type import (
10 GraphQLNamedType,
11 GraphQLScalarLiteralParser,
12 GraphQLScalarSerializer,
13 GraphQLScalarType,
14 GraphQLScalarValueParser,
15 GraphQLSchema,
16 )
17 from graphql.utilities import value_from_ast_untyped
18
19 from .types import SchemaBindable
20
21
22 class ScalarType(SchemaBindable):
23 _serialize: Optional[GraphQLScalarSerializer]
24 _parse_value: Optional[GraphQLScalarValueParser]
25 _parse_literal: Optional[GraphQLScalarLiteralParser]
26
27 def __init__(
28 self,
29 name: str,
30 *,
31 serializer: GraphQLScalarSerializer = None,
32 value_parser: GraphQLScalarValueParser = None,
33 literal_parser: GraphQLScalarLiteralParser = None,
34 ) -> None:
35 self.name = name
36 self._serialize = serializer
37 self._parse_value = value_parser
38 self._parse_literal = literal_parser
39
40 def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:
41 self._serialize = f
42 return f
43
44 def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
45 self._parse_value = f
46 if not self._parse_literal:
47 self._parse_literal = create_default_literal_parser(f)
48 return f
49
50 def set_literal_parser(
51 self, f: GraphQLScalarLiteralParser
52 ) -> GraphQLScalarLiteralParser:
53 self._parse_literal = f
54 return f
55
56 # Alias above setters for consistent decorator API
57 serializer = set_serializer
58 value_parser = set_value_parser
59 literal_parser = set_literal_parser
60
61 def bind_to_schema(self, schema: GraphQLSchema) -> None:
62 graphql_type = schema.type_map.get(self.name)
63 self.validate_graphql_type(graphql_type)
64 graphql_type = cast(GraphQLScalarType, graphql_type)
65
66 if self._serialize:
67 # See mypy bug https://github.com/python/mypy/issues/2427
68 graphql_type.serialize = self._serialize # type: ignore
69 if self._parse_value:
70 graphql_type.parse_value = self._parse_value # type: ignore
71 if self._parse_literal:
72 graphql_type.parse_literal = self._parse_literal # type: ignore
73
74 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
75 if not graphql_type:
76 raise ValueError("Scalar %s is not defined in the schema" % self.name)
77 if not isinstance(graphql_type, GraphQLScalarType):
78 raise ValueError(
79 "%s is defined in the schema, but it is instance of %s (expected %s)"
80 % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
81 )
82
83
84 SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)
85
86
87 def create_default_literal_parser(
88 value_parser: GraphQLScalarValueParser,
89 ) -> GraphQLScalarLiteralParser:
90 def default_literal_parser(ast):
91 return value_parser(value_from_ast_untyped(ast))
92
93 return default_literal_parser
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/scalars.py b/ariadne/scalars.py
--- a/ariadne/scalars.py
+++ b/ariadne/scalars.py
@@ -1,11 +1,5 @@
from typing import Optional, cast
-from graphql.language.ast import (
- BooleanValueNode,
- FloatValueNode,
- IntValueNode,
- StringValueNode,
-)
from graphql.type import (
GraphQLNamedType,
GraphQLScalarLiteralParser,
@@ -14,7 +8,6 @@
GraphQLScalarValueParser,
GraphQLSchema,
)
-from graphql.utilities import value_from_ast_untyped
from .types import SchemaBindable
@@ -43,8 +36,6 @@
def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
self._parse_value = f
- if not self._parse_literal:
- self._parse_literal = create_default_literal_parser(f)
return f
def set_literal_parser(
@@ -79,15 +70,3 @@
"%s is defined in the schema, but it is instance of %s (expected %s)"
% (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
)
-
-
-SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)
-
-
-def create_default_literal_parser(
- value_parser: GraphQLScalarValueParser,
-) -> GraphQLScalarLiteralParser:
- def default_literal_parser(ast):
- return value_parser(value_from_ast_untyped(ast))
-
- return default_literal_parser
| {"golden_diff": "diff --git a/ariadne/scalars.py b/ariadne/scalars.py\n--- a/ariadne/scalars.py\n+++ b/ariadne/scalars.py\n@@ -1,11 +1,5 @@\n from typing import Optional, cast\n \n-from graphql.language.ast import (\n- BooleanValueNode,\n- FloatValueNode,\n- IntValueNode,\n- StringValueNode,\n-)\n from graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n@@ -14,7 +8,6 @@\n GraphQLScalarValueParser,\n GraphQLSchema,\n )\n-from graphql.utilities import value_from_ast_untyped\n \n from .types import SchemaBindable\n \n@@ -43,8 +36,6 @@\n \n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n- if not self._parse_literal:\n- self._parse_literal = create_default_literal_parser(f)\n return f\n \n def set_literal_parser(\n@@ -79,15 +70,3 @@\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n-\n-\n-SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n-\n-\n-def create_default_literal_parser(\n- value_parser: GraphQLScalarValueParser,\n-) -> GraphQLScalarLiteralParser:\n- def default_literal_parser(ast):\n- return value_parser(value_from_ast_untyped(ast))\n-\n- return default_literal_parser\n", "issue": "Argument 'code' has invalid value \"ABC\"\nI think there is a bug when using both literal and variable values with a custom scalar.\r\n\r\n```python\r\nfrom ariadne import ScalarType\r\n\r\ntestscalar = ScalarType('TestScalar')\r\n\r\[email protected]\r\ndef serializer(value):\r\n return value.upper()\r\n\r\n\r\[email protected]_parser\r\ndef value_parser(value):\r\n if value:\r\n return serializer(value)\r\n\r\n\r\[email protected]_parser\r\ndef literal_parser(ast):\r\n value = str(ast.value)\r\n return value_parser(value)\r\n```\r\n\r\nIf you then make the following query:\r\n```graphql\r\nquery($code: TestScalar) {\r\n test1: testType(code: $code) {\r\n id\r\n }\r\n test2: testType(code: \"ABC\") {\r\n id\r\n }\r\n}\r\n```\r\n This error is returned: Argument 'code' has invalid value \"ABC\"\r\n\r\nIf you don't pass variables and only use \"literal\" values it works. Same for if you only pass variables it works fine.\r\n\r\nIf you don't set up a resolver for \"testType\" then no error is returned.\r\n\r\nNot sure what is happening but I think this is a bug. If not, does anyone know why this is happening?\n", "before_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.language.ast import (\n BooleanValueNode,\n FloatValueNode,\n IntValueNode,\n StringValueNode,\n)\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\nfrom graphql.utilities import value_from_ast_untyped\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n if not self._parse_literal:\n self._parse_literal = create_default_literal_parser(f)\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value # type: ignore\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal # type: ignore\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n\n\nSCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n\n\ndef create_default_literal_parser(\n value_parser: GraphQLScalarValueParser,\n) -> GraphQLScalarLiteralParser:\n def default_literal_parser(ast):\n return value_parser(value_from_ast_untyped(ast))\n\n return default_literal_parser\n", "path": "ariadne/scalars.py"}], "after_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value # type: ignore\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal # type: ignore\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n", "path": "ariadne/scalars.py"}]} | 1,366 | 351 |
gh_patches_debug_66140 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Run Flake8 lint on RHEL6
Currently, flake8 is run only on RHEL7 and 8 and not on RHEL6. According to [the documentation](http://flake8.pycqa.org/en/latest/#installation) it is necessary to run flake8 with the exact Python version that is used. Thus to be sure that the syntax is ok even for the older Python version, we have to run in to RHEL6 too.
Tackled in #1251.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from setuptools import setup, find_packages
3
4 __here__ = os.path.dirname(os.path.abspath(__file__))
5
6 package_info = dict.fromkeys(["RELEASE", "COMMIT", "VERSION", "NAME"])
7
8 for name in package_info:
9 with open(os.path.join(__here__, "insights", name)) as f:
10 package_info[name] = f.read().strip()
11
12 entry_points = {
13 'console_scripts': [
14 'insights-run = insights:main',
15 'insights-info = insights.tools.query:main',
16 'gen_api = insights.tools.generate_api_config:main',
17 'insights-perf = insights.tools.perf:main',
18 'client = insights.client:run',
19 'mangle = insights.util.mangle:main'
20 ]
21 }
22
23 runtime = set([
24 'pyyaml>=3.10,<=3.13',
25 'six',
26 ])
27
28
29 def maybe_require(pkg):
30 try:
31 __import__(pkg)
32 except ImportError:
33 runtime.add(pkg)
34
35
36 maybe_require("importlib")
37 maybe_require("argparse")
38
39
40 client = set([
41 'requests',
42 'pyOpenSSL',
43 ])
44
45 develop = set([
46 'futures==3.0.5',
47 'requests==2.13.0',
48 'wheel',
49 ])
50
51 docs = set([
52 'Sphinx==1.7.9',
53 'nbsphinx==0.3.1',
54 'sphinx_rtd_theme',
55 'ipython<6',
56 'colorama',
57 ])
58
59 testing = set([
60 'coverage==4.3.4',
61 'pytest==3.0.6',
62 'pytest-cov==2.4.0',
63 'mock==2.0.0',
64 ])
65
66 linting = set([
67 'flake8==3.3.0',
68 ])
69
70 optional = set([
71 'jinja2',
72 'python-cjson',
73 'python-logstash',
74 'python-statsd',
75 'watchdog',
76 ])
77
78 if __name__ == "__main__":
79 # allows for runtime modification of rpm name
80 name = os.environ.get("INSIGHTS_CORE_NAME", package_info["NAME"])
81
82 setup(
83 name=name,
84 version=package_info["VERSION"],
85 description="Insights Core is a data collection and analysis framework",
86 long_description=open("README.rst").read(),
87 url="https://github.com/redhatinsights/insights-core",
88 author="Red Hat, Inc.",
89 author_email="[email protected]",
90 packages=find_packages(),
91 install_requires=list(runtime),
92 package_data={'': ['LICENSE']},
93 license='Apache 2.0',
94 extras_require={
95 'develop': list(runtime | develop | client | docs | linting | testing),
96 'client': list(runtime | client),
97 'optional': list(optional),
98 'docs': list(docs),
99 'linting': list(linting | client),
100 'testing': list(testing | client)
101 },
102 classifiers=[
103 'Development Status :: 5 - Production/Stable',
104 'Intended Audience :: Developers',
105 'Natural Language :: English',
106 'License :: OSI Approved :: Apache Software License',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: 2.6',
109 'Programming Language :: Python :: 2.7',
110 'Programming Language :: Python :: 3.3',
111 'Programming Language :: Python :: 3.4',
112 'Programming Language :: Python :: 3.5',
113 'Programming Language :: Python :: 3.6'
114 ],
115 entry_points=entry_points,
116 include_package_data=True
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,7 @@
])
linting = set([
- 'flake8==3.3.0',
+ 'flake8==2.6.2',
])
optional = set([
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,7 +64,7 @@\n ])\n \n linting = set([\n- 'flake8==3.3.0',\n+ 'flake8==2.6.2',\n ])\n \n optional = set([\n", "issue": "Run Flake8 lint on RHEL6\nCurrently, flake8 is run only on RHEL7 and 8 and not on RHEL6. According to [the documentation](http://flake8.pycqa.org/en/latest/#installation) it is necessary to run flake8 with the exact Python version that is used. Thus to be sure that the syntax is ok even for the older Python version, we have to run in to RHEL6 too.\r\n\r\nTackled in #1251.\n", "before_files": [{"content": "import os\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-run = insights:main',\n 'insights-info = insights.tools.query:main',\n 'gen_api = insights.tools.generate_api_config:main',\n 'insights-perf = insights.tools.perf:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'pyyaml>=3.10,<=3.13',\n 'six',\n])\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'pyOpenSSL',\n])\n\ndevelop = set([\n 'futures==3.0.5',\n 'requests==2.13.0',\n 'wheel',\n])\n\ndocs = set([\n 'Sphinx==1.7.9',\n 'nbsphinx==0.3.1',\n 'sphinx_rtd_theme',\n 'ipython<6',\n 'colorama',\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\nlinting = set([\n 'flake8==3.3.0',\n])\n\noptional = set([\n 'jinja2',\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing),\n 'client': list(runtime | client),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom setuptools import setup, find_packages\n\n__here__ = os.path.dirname(os.path.abspath(__file__))\n\npackage_info = dict.fromkeys([\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\nfor name in package_info:\n with open(os.path.join(__here__, \"insights\", name)) as f:\n package_info[name] = f.read().strip()\n\nentry_points = {\n 'console_scripts': [\n 'insights-run = insights:main',\n 'insights-info = insights.tools.query:main',\n 'gen_api = insights.tools.generate_api_config:main',\n 'insights-perf = insights.tools.perf:main',\n 'client = insights.client:run',\n 'mangle = insights.util.mangle:main'\n ]\n}\n\nruntime = set([\n 'pyyaml>=3.10,<=3.13',\n 'six',\n])\n\n\ndef maybe_require(pkg):\n try:\n __import__(pkg)\n except ImportError:\n runtime.add(pkg)\n\n\nmaybe_require(\"importlib\")\nmaybe_require(\"argparse\")\n\n\nclient = set([\n 'requests',\n 'pyOpenSSL',\n])\n\ndevelop = set([\n 'futures==3.0.5',\n 'requests==2.13.0',\n 'wheel',\n])\n\ndocs = set([\n 'Sphinx==1.7.9',\n 'nbsphinx==0.3.1',\n 'sphinx_rtd_theme',\n 'ipython<6',\n 'colorama',\n])\n\ntesting = set([\n 'coverage==4.3.4',\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'mock==2.0.0',\n])\n\nlinting = set([\n 'flake8==2.6.2',\n])\n\noptional = set([\n 'jinja2',\n 'python-cjson',\n 'python-logstash',\n 'python-statsd',\n 'watchdog',\n])\n\nif __name__ == \"__main__\":\n # allows for runtime modification of rpm name\n name = os.environ.get(\"INSIGHTS_CORE_NAME\", package_info[\"NAME\"])\n\n setup(\n name=name,\n version=package_info[\"VERSION\"],\n description=\"Insights Core is a data collection and analysis framework\",\n long_description=open(\"README.rst\").read(),\n url=\"https://github.com/redhatinsights/insights-core\",\n author=\"Red Hat, Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n install_requires=list(runtime),\n package_data={'': ['LICENSE']},\n license='Apache 2.0',\n extras_require={\n 'develop': list(runtime | develop | client | docs | linting | testing),\n 'client': list(runtime | client),\n 'optional': list(optional),\n 'docs': list(docs),\n 'linting': list(linting | client),\n 'testing': list(testing | client)\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n entry_points=entry_points,\n include_package_data=True\n )\n", "path": "setup.py"}]} | 1,382 | 69 |
gh_patches_debug_32323 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Print out name of the created master pod after submitting a job via client
Currently, when submitting a job via `elasticdl train --job_name=xxx`, no master pod information will be printed out and users have to guess the name to master pod from the job name they provided.
We should print out the name to master pod when job has been submitted successfully.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/api.py`
Content:
```
1 import os
2
3 from elasticdl.python.common import k8s_client as k8s
4 from elasticdl.python.elasticdl.image_builder import (
5 build_and_push_docker_image,
6 )
7
8 MODEL_ROOT_PATH = "/model_zoo"
9 CLUSTER_SPEC_ROOT_PATH = "/cluster_spec"
10
11
12 def train(args):
13 image_name = build_and_push_docker_image(
14 model_zoo=args.model_def,
15 base_image=args.image_base,
16 docker_image_prefix=args.docker_image_prefix,
17 extra_pypi=args.extra_pypi_index,
18 cluster_spec=args.cluster_spec,
19 )
20 container_args = [
21 "-m",
22 "elasticdl.python.master.main",
23 "--job_name",
24 args.job_name,
25 "--worker_image",
26 image_name,
27 "--model_def",
28 _model_def_in_docker(args.model_def),
29 "--cluster_spec",
30 _cluster_spec_def_in_docker(args.cluster_spec),
31 "--num_workers",
32 str(args.num_workers),
33 "--worker_resource_request",
34 args.worker_resource_request,
35 "--worker_resource_limit",
36 args.worker_resource_limit,
37 "--namespace",
38 args.namespace,
39 "--tensorboard_log_dir",
40 args.tensorboard_log_dir,
41 "--records_per_task",
42 str(args.records_per_task),
43 "--num_epochs",
44 str(args.num_epochs),
45 "--grads_to_wait",
46 str(args.grads_to_wait),
47 "--minibatch_size",
48 str(args.minibatch_size),
49 "--training_data_dir",
50 args.training_data_dir,
51 "--evaluation_data_dir",
52 args.evaluation_data_dir,
53 "--checkpoint_steps",
54 str(args.checkpoint_steps),
55 "--checkpoint_dir",
56 args.checkpoint_dir,
57 "--keep_checkpoint_max",
58 str(args.keep_checkpoint_max),
59 "--evaluation_steps",
60 str(args.evaluation_steps),
61 "--evaluation_start_delay_secs",
62 str(args.evaluation_start_delay_secs),
63 "--evaluation_throttle_secs",
64 str(args.evaluation_throttle_secs),
65 "--input_fn",
66 args.input_fn,
67 "--loss",
68 args.loss,
69 "--optimizer",
70 args.optimizer,
71 "--eval_metrics_fn",
72 args.eval_metrics_fn,
73 "--model_class",
74 args.model_class,
75 "--model_params",
76 args.model_params,
77 ]
78 container_args.extend(["--image_pull_policy", args.image_pull_policy])
79 container_args.extend(["--restart_policy", args.restart_policy])
80 container_args.extend(["--volume", args.volume])
81
82 args.master_resource_limit = (
83 args.master_resource_limit
84 if args.master_resource_limit
85 else args.master_resource_request
86 )
87
88 k8s.Client(
89 image_name=image_name,
90 namespace=args.namespace,
91 job_name=args.job_name,
92 event_callback=None,
93 cluster_spec=args.cluster_spec,
94 ).create_master(
95 resource_requests=args.master_resource_request,
96 resource_limits=args.master_resource_limit,
97 args=container_args,
98 pod_priority=args.master_pod_priority,
99 image_pull_policy=args.image_pull_policy,
100 restart_policy=args.restart_policy,
101 volume=args.volume,
102 )
103 # TODO: print dashboard url after launching the master pod
104
105
106 def evaluate(args):
107 image_name = build_and_push_docker_image(
108 model_zoo=args.model_def,
109 base_image=args.image_base,
110 docker_image_prefix=args.docker_image_prefix,
111 extra_pypi=args.extra_pypi_index,
112 cluster_spec=args.cluster_spec,
113 )
114 container_args = [
115 "-m",
116 "elasticdl.python.master.main",
117 "--job_name",
118 args.job_name,
119 "--worker_image",
120 image_name,
121 "--model_def",
122 _model_def_in_docker(args.model_def),
123 "--cluster_spec",
124 _cluster_spec_def_in_docker(args.cluster_spec),
125 "--num_workers",
126 str(args.num_workers),
127 "--worker_resource_request",
128 args.worker_resource_request,
129 "--worker_resource_limit",
130 args.worker_resource_limit,
131 "--namespace",
132 args.namespace,
133 "--records_per_task",
134 str(args.records_per_task),
135 "--minibatch_size",
136 str(args.minibatch_size),
137 "--evaluation_data_dir",
138 args.evaluation_data_dir,
139 "--checkpoint_filename_for_init",
140 args.checkpoint_filename_for_init,
141 "--input_fn",
142 args.input_fn,
143 "--eval_metrics_fn",
144 args.eval_metrics_fn,
145 "--model_class",
146 args.model_class,
147 "--model_params",
148 args.model_params,
149 ]
150 container_args.extend(["--image_pull_policy", args.image_pull_policy])
151 container_args.extend(["--restart_policy", args.restart_policy])
152 container_args.extend(["--volume", args.volume])
153
154 args.master_resource_limit = (
155 args.master_resource_limit
156 if args.master_resource_limit
157 else args.master_resource_request
158 )
159
160 k8s.Client(
161 image_name=image_name,
162 namespace=args.namespace,
163 job_name=args.job_name,
164 event_callback=None,
165 cluster_spec=args.cluster_spec,
166 ).create_master(
167 resource_requests=args.master_resource_request,
168 resource_limits=args.master_resource_limit,
169 args=container_args,
170 pod_priority=args.master_pod_priority,
171 image_pull_policy=args.image_pull_policy,
172 restart_policy=args.restart_policy,
173 volume=args.volume,
174 )
175
176
177 def _model_def_in_docker(model_def):
178 return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_def))
179
180
181 def _cluster_spec_def_in_docker(cluster_spec):
182 return (
183 os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))
184 if cluster_spec
185 else ""
186 )
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/api.py b/elasticdl/python/elasticdl/api.py
--- a/elasticdl/python/elasticdl/api.py
+++ b/elasticdl/python/elasticdl/api.py
@@ -85,21 +85,7 @@
else args.master_resource_request
)
- k8s.Client(
- image_name=image_name,
- namespace=args.namespace,
- job_name=args.job_name,
- event_callback=None,
- cluster_spec=args.cluster_spec,
- ).create_master(
- resource_requests=args.master_resource_request,
- resource_limits=args.master_resource_limit,
- args=container_args,
- pod_priority=args.master_pod_priority,
- image_pull_policy=args.image_pull_policy,
- restart_policy=args.restart_policy,
- volume=args.volume,
- )
+ _submit_job(image_name, args, container_args)
# TODO: print dashboard url after launching the master pod
@@ -157,20 +143,30 @@
else args.master_resource_request
)
- k8s.Client(
+ _submit_job(image_name, args, container_args)
+
+
+def _submit_job(image_name, client_args, container_args):
+ client = k8s.Client(
image_name=image_name,
- namespace=args.namespace,
- job_name=args.job_name,
+ namespace=client_args.namespace,
+ job_name=client_args.job_name,
event_callback=None,
- cluster_spec=args.cluster_spec,
- ).create_master(
- resource_requests=args.master_resource_request,
- resource_limits=args.master_resource_limit,
+ cluster_spec=client_args.cluster_spec,
+ )
+
+ client.create_master(
+ resource_requests=client_args.master_resource_request,
+ resource_limits=client_args.master_resource_limit,
args=container_args,
- pod_priority=args.master_pod_priority,
- image_pull_policy=args.image_pull_policy,
- restart_policy=args.restart_policy,
- volume=args.volume,
+ pod_priority=client_args.master_pod_priority,
+ image_pull_policy=client_args.image_pull_policy,
+ restart_policy=client_args.restart_policy,
+ volume=client_args.volume,
+ )
+ print(
+ "ElasticDL job %s was successfully submitted. The master pod is: %s."
+ % (client_args.job_name, client.get_master_pod_name())
)
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/api.py b/elasticdl/python/elasticdl/api.py\n--- a/elasticdl/python/elasticdl/api.py\n+++ b/elasticdl/python/elasticdl/api.py\n@@ -85,21 +85,7 @@\n else args.master_resource_request\n )\n \n- k8s.Client(\n- image_name=image_name,\n- namespace=args.namespace,\n- job_name=args.job_name,\n- event_callback=None,\n- cluster_spec=args.cluster_spec,\n- ).create_master(\n- resource_requests=args.master_resource_request,\n- resource_limits=args.master_resource_limit,\n- args=container_args,\n- pod_priority=args.master_pod_priority,\n- image_pull_policy=args.image_pull_policy,\n- restart_policy=args.restart_policy,\n- volume=args.volume,\n- )\n+ _submit_job(image_name, args, container_args)\n # TODO: print dashboard url after launching the master pod\n \n \n@@ -157,20 +143,30 @@\n else args.master_resource_request\n )\n \n- k8s.Client(\n+ _submit_job(image_name, args, container_args)\n+\n+\n+def _submit_job(image_name, client_args, container_args):\n+ client = k8s.Client(\n image_name=image_name,\n- namespace=args.namespace,\n- job_name=args.job_name,\n+ namespace=client_args.namespace,\n+ job_name=client_args.job_name,\n event_callback=None,\n- cluster_spec=args.cluster_spec,\n- ).create_master(\n- resource_requests=args.master_resource_request,\n- resource_limits=args.master_resource_limit,\n+ cluster_spec=client_args.cluster_spec,\n+ )\n+\n+ client.create_master(\n+ resource_requests=client_args.master_resource_request,\n+ resource_limits=client_args.master_resource_limit,\n args=container_args,\n- pod_priority=args.master_pod_priority,\n- image_pull_policy=args.image_pull_policy,\n- restart_policy=args.restart_policy,\n- volume=args.volume,\n+ pod_priority=client_args.master_pod_priority,\n+ image_pull_policy=client_args.image_pull_policy,\n+ restart_policy=client_args.restart_policy,\n+ volume=client_args.volume,\n+ )\n+ print(\n+ \"ElasticDL job %s was successfully submitted. The master pod is: %s.\"\n+ % (client_args.job_name, client.get_master_pod_name())\n )\n", "issue": "Print out name of the created master pod after submitting a job via client\nCurrently, when submitting a job via `elasticdl train --job_name=xxx`, no master pod information will be printed out and users have to guess the name to master pod from the job name they provided. \r\n\r\nWe should print out the name to master pod when job has been submitted successfully.\n", "before_files": [{"content": "import os\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.elasticdl.image_builder import (\n build_and_push_docker_image,\n)\n\nMODEL_ROOT_PATH = \"/model_zoo\"\nCLUSTER_SPEC_ROOT_PATH = \"/cluster_spec\"\n\n\ndef train(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_def,\n base_image=args.image_base,\n docker_image_prefix=args.docker_image_prefix,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--job_name\",\n args.job_name,\n \"--worker_image\",\n image_name,\n \"--model_def\",\n _model_def_in_docker(args.model_def),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n \"--num_workers\",\n str(args.num_workers),\n \"--worker_resource_request\",\n args.worker_resource_request,\n \"--worker_resource_limit\",\n args.worker_resource_limit,\n \"--namespace\",\n args.namespace,\n \"--tensorboard_log_dir\",\n args.tensorboard_log_dir,\n \"--records_per_task\",\n str(args.records_per_task),\n \"--num_epochs\",\n str(args.num_epochs),\n \"--grads_to_wait\",\n str(args.grads_to_wait),\n \"--minibatch_size\",\n str(args.minibatch_size),\n \"--training_data_dir\",\n args.training_data_dir,\n \"--evaluation_data_dir\",\n args.evaluation_data_dir,\n \"--checkpoint_steps\",\n str(args.checkpoint_steps),\n \"--checkpoint_dir\",\n args.checkpoint_dir,\n \"--keep_checkpoint_max\",\n str(args.keep_checkpoint_max),\n \"--evaluation_steps\",\n str(args.evaluation_steps),\n \"--evaluation_start_delay_secs\",\n str(args.evaluation_start_delay_secs),\n \"--evaluation_throttle_secs\",\n str(args.evaluation_throttle_secs),\n \"--input_fn\",\n args.input_fn,\n \"--loss\",\n args.loss,\n \"--optimizer\",\n args.optimizer,\n \"--eval_metrics_fn\",\n args.eval_metrics_fn,\n \"--model_class\",\n args.model_class,\n \"--model_params\",\n args.model_params,\n ]\n container_args.extend([\"--image_pull_policy\", args.image_pull_policy])\n container_args.extend([\"--restart_policy\", args.restart_policy])\n container_args.extend([\"--volume\", args.volume])\n\n args.master_resource_limit = (\n args.master_resource_limit\n if args.master_resource_limit\n else args.master_resource_request\n )\n\n k8s.Client(\n image_name=image_name,\n namespace=args.namespace,\n job_name=args.job_name,\n event_callback=None,\n cluster_spec=args.cluster_spec,\n ).create_master(\n resource_requests=args.master_resource_request,\n resource_limits=args.master_resource_limit,\n args=container_args,\n pod_priority=args.master_pod_priority,\n image_pull_policy=args.image_pull_policy,\n restart_policy=args.restart_policy,\n volume=args.volume,\n )\n # TODO: print dashboard url after launching the master pod\n\n\ndef evaluate(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_def,\n base_image=args.image_base,\n docker_image_prefix=args.docker_image_prefix,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--job_name\",\n args.job_name,\n \"--worker_image\",\n image_name,\n \"--model_def\",\n _model_def_in_docker(args.model_def),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n \"--num_workers\",\n str(args.num_workers),\n \"--worker_resource_request\",\n args.worker_resource_request,\n \"--worker_resource_limit\",\n args.worker_resource_limit,\n \"--namespace\",\n args.namespace,\n \"--records_per_task\",\n str(args.records_per_task),\n \"--minibatch_size\",\n str(args.minibatch_size),\n \"--evaluation_data_dir\",\n args.evaluation_data_dir,\n \"--checkpoint_filename_for_init\",\n args.checkpoint_filename_for_init,\n \"--input_fn\",\n args.input_fn,\n \"--eval_metrics_fn\",\n args.eval_metrics_fn,\n \"--model_class\",\n args.model_class,\n \"--model_params\",\n args.model_params,\n ]\n container_args.extend([\"--image_pull_policy\", args.image_pull_policy])\n container_args.extend([\"--restart_policy\", args.restart_policy])\n container_args.extend([\"--volume\", args.volume])\n\n args.master_resource_limit = (\n args.master_resource_limit\n if args.master_resource_limit\n else args.master_resource_request\n )\n\n k8s.Client(\n image_name=image_name,\n namespace=args.namespace,\n job_name=args.job_name,\n event_callback=None,\n cluster_spec=args.cluster_spec,\n ).create_master(\n resource_requests=args.master_resource_request,\n resource_limits=args.master_resource_limit,\n args=container_args,\n pod_priority=args.master_pod_priority,\n image_pull_policy=args.image_pull_policy,\n restart_policy=args.restart_policy,\n volume=args.volume,\n )\n\n\ndef _model_def_in_docker(model_def):\n return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_def))\n\n\ndef _cluster_spec_def_in_docker(cluster_spec):\n return (\n os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))\n if cluster_spec\n else \"\"\n )\n", "path": "elasticdl/python/elasticdl/api.py"}], "after_files": [{"content": "import os\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.elasticdl.image_builder import (\n build_and_push_docker_image,\n)\n\nMODEL_ROOT_PATH = \"/model_zoo\"\nCLUSTER_SPEC_ROOT_PATH = \"/cluster_spec\"\n\n\ndef train(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_def,\n base_image=args.image_base,\n docker_image_prefix=args.docker_image_prefix,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--job_name\",\n args.job_name,\n \"--worker_image\",\n image_name,\n \"--model_def\",\n _model_def_in_docker(args.model_def),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n \"--num_workers\",\n str(args.num_workers),\n \"--worker_resource_request\",\n args.worker_resource_request,\n \"--worker_resource_limit\",\n args.worker_resource_limit,\n \"--namespace\",\n args.namespace,\n \"--tensorboard_log_dir\",\n args.tensorboard_log_dir,\n \"--records_per_task\",\n str(args.records_per_task),\n \"--num_epochs\",\n str(args.num_epochs),\n \"--grads_to_wait\",\n str(args.grads_to_wait),\n \"--minibatch_size\",\n str(args.minibatch_size),\n \"--training_data_dir\",\n args.training_data_dir,\n \"--evaluation_data_dir\",\n args.evaluation_data_dir,\n \"--checkpoint_steps\",\n str(args.checkpoint_steps),\n \"--checkpoint_dir\",\n args.checkpoint_dir,\n \"--keep_checkpoint_max\",\n str(args.keep_checkpoint_max),\n \"--evaluation_steps\",\n str(args.evaluation_steps),\n \"--evaluation_start_delay_secs\",\n str(args.evaluation_start_delay_secs),\n \"--evaluation_throttle_secs\",\n str(args.evaluation_throttle_secs),\n \"--input_fn\",\n args.input_fn,\n \"--loss\",\n args.loss,\n \"--optimizer\",\n args.optimizer,\n \"--eval_metrics_fn\",\n args.eval_metrics_fn,\n \"--model_class\",\n args.model_class,\n \"--model_params\",\n args.model_params,\n ]\n container_args.extend([\"--image_pull_policy\", args.image_pull_policy])\n container_args.extend([\"--restart_policy\", args.restart_policy])\n container_args.extend([\"--volume\", args.volume])\n\n args.master_resource_limit = (\n args.master_resource_limit\n if args.master_resource_limit\n else args.master_resource_request\n )\n\n _submit_job(image_name, args, container_args)\n # TODO: print dashboard url after launching the master pod\n\n\ndef evaluate(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_def,\n base_image=args.image_base,\n docker_image_prefix=args.docker_image_prefix,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--job_name\",\n args.job_name,\n \"--worker_image\",\n image_name,\n \"--model_def\",\n _model_def_in_docker(args.model_def),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n \"--num_workers\",\n str(args.num_workers),\n \"--worker_resource_request\",\n args.worker_resource_request,\n \"--worker_resource_limit\",\n args.worker_resource_limit,\n \"--namespace\",\n args.namespace,\n \"--records_per_task\",\n str(args.records_per_task),\n \"--minibatch_size\",\n str(args.minibatch_size),\n \"--evaluation_data_dir\",\n args.evaluation_data_dir,\n \"--checkpoint_filename_for_init\",\n args.checkpoint_filename_for_init,\n \"--input_fn\",\n args.input_fn,\n \"--eval_metrics_fn\",\n args.eval_metrics_fn,\n \"--model_class\",\n args.model_class,\n \"--model_params\",\n args.model_params,\n ]\n container_args.extend([\"--image_pull_policy\", args.image_pull_policy])\n container_args.extend([\"--restart_policy\", args.restart_policy])\n container_args.extend([\"--volume\", args.volume])\n\n args.master_resource_limit = (\n args.master_resource_limit\n if args.master_resource_limit\n else args.master_resource_request\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef _submit_job(image_name, client_args, container_args):\n client = k8s.Client(\n image_name=image_name,\n namespace=client_args.namespace,\n job_name=client_args.job_name,\n event_callback=None,\n cluster_spec=client_args.cluster_spec,\n )\n\n client.create_master(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n )\n print(\n \"ElasticDL job %s was successfully submitted. The master pod is: %s.\"\n % (client_args.job_name, client.get_master_pod_name())\n )\n\n\ndef _model_def_in_docker(model_def):\n return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_def))\n\n\ndef _cluster_spec_def_in_docker(cluster_spec):\n return (\n os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))\n if cluster_spec\n else \"\"\n )\n", "path": "elasticdl/python/elasticdl/api.py"}]} | 1,937 | 520 |
gh_patches_debug_21365 | rasdani/github-patches | git_diff | GoogleCloudPlatform__PerfKitBenchmarker-586 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Help doesn't render with FlagValuesProxy.
Example:
```
[:~/git/PerfKitBenchmarker] [perfkit] release-0.23.0+* 1 ± python pkb.py --benchmarks redis_ycsb --machine_type n1-standard-4 --json_output redis_ycsb.json
ERROR:root:Unknown command line flag 'json_output'
Usage: pkb.py ARGS
<perfkitbenchmarker.context.FlagValuesProxy object at 0x7f51910bc050>
```
@ehankland - do you have a minute to look at this? If not assign back to me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `perfkitbenchmarker/context.py`
Content:
```
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Module for working with the current thread context."""
16
17 import threading
18
19 import gflags as flags
20
21
22 class FlagsModuleProxy(object):
23 """Class which acts as a proxy for the flags module.
24
25 When the FLAGS attribute is accessed, BENCHMARK_FLAGS will be returned
26 rather than the global FlagValues object. BENCHMARK_FLAGS is an instance
27 of FlagValuesProxy, which enables benchmarks to run with different and
28 even conflicting flags. Accessing the GLOBAL_FLAGS attribute will return
29 the global FlagValues object. Otherwise, this will behave just like the
30 flags module.
31 """
32
33 def __getattr__(self, name):
34 if name == 'FLAGS':
35 return BENCHMARK_FLAGS
36 elif name == 'GLOBAL_FLAGS':
37 return flags.FLAGS
38 return flags.__dict__[name]
39
40
41 class FlagValuesProxy(object):
42 """Class which provides the same interface as FlagValues.
43
44 By acting as a proxy for the FlagValues object (i.e. flags.FLAGS),
45 this enables benchmark specific flags. This proxy attempts to
46 use the current thread's BenchmarkSpec's FlagValues object, but
47 falls back to using flags.FLAGS if the thread has no BenchmarkSpec
48 object.
49 """
50
51 @property
52 def _thread_flag_values(self):
53 """Returns the correct FlagValues object for the current thread.
54
55 This first tries to get the BenchmarkSpec object corresponding to the
56 current thread. If there is one, it returns that spec's FlagValues
57 object. If there isn't one, it will return the global FlagValues
58 object.
59 """
60 benchmark_spec = GetThreadBenchmarkSpec()
61 if benchmark_spec:
62 return benchmark_spec.FLAGS
63 else:
64 return flags.FLAGS
65
66 def __setattr__(self, name, value):
67 self._thread_flag_values.__setattr__(name, value)
68
69 def __getattr__(self, name):
70 return self._thread_flag_values.__getattr__(name)
71
72 def __setitem__(self, key, value):
73 self._thread_flag_values.__setitem__(key, value)
74
75 def __getitem__(self, key):
76 return self._thread_flag_values.__getitem__(key)
77
78 def __call__(self, argv):
79 return self._thread_flag_values.__call__(argv)
80
81 def FlagDict(self):
82 return self._thread_flag_values.FlagDict()
83
84
85 BENCHMARK_FLAGS = FlagValuesProxy()
86
87
88 class _ThreadData(threading.local):
89 def __init__(self):
90 self.benchmark_spec = None
91
92
93 _thread_local = _ThreadData()
94
95
96 def SetThreadBenchmarkSpec(benchmark_spec):
97 """Sets the current thread's BenchmarkSpec object."""
98 _thread_local.benchmark_spec = benchmark_spec
99
100
101 def GetThreadBenchmarkSpec():
102 """Gets the current thread's BenchmarkSpec object.
103
104 If SetThreadBenchmarkSpec() has not been called in either the current thread
105 or in an ancestor, then this method will return None by default.
106 """
107 return _thread_local.benchmark_spec
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/perfkitbenchmarker/context.py b/perfkitbenchmarker/context.py
--- a/perfkitbenchmarker/context.py
+++ b/perfkitbenchmarker/context.py
@@ -63,23 +63,24 @@
else:
return flags.FLAGS
- def __setattr__(self, name, value):
- self._thread_flag_values.__setattr__(name, value)
- def __getattr__(self, name):
- return self._thread_flag_values.__getattr__(name)
-
- def __setitem__(self, key, value):
- self._thread_flag_values.__setitem__(key, value)
-
- def __getitem__(self, key):
- return self._thread_flag_values.__getitem__(key)
-
- def __call__(self, argv):
- return self._thread_flag_values.__call__(argv)
-
- def FlagDict(self):
- return self._thread_flag_values.FlagDict()
+def _AddProxyMethod(f_name):
+ """Adds a method to FlagValuesProxy that forwards to _thread_flag_values."""
+ def f(self, *args, **kwargs):
+ return getattr(self._thread_flag_values, f_name)(*args, **kwargs)
+ f.__name__ = f_name
+ f.__doc__ = 'Proxied ' + f_name
+ setattr(FlagValuesProxy, f_name, f)
+
+
+# TODO: introduce a more generic proxy.
+for _f_name in ['FlagDict', 'Reset', 'SetDefault', 'RegisteredFlags',
+ 'FlagValuesDict', '__contains__', '__iter__', '__call__',
+ '__setattr__', '__getattr__', '__setitem__', '__getitem__',
+ '__str__']:
+ _AddProxyMethod(_f_name)
+del _f_name
+del _AddProxyMethod
BENCHMARK_FLAGS = FlagValuesProxy()
| {"golden_diff": "diff --git a/perfkitbenchmarker/context.py b/perfkitbenchmarker/context.py\n--- a/perfkitbenchmarker/context.py\n+++ b/perfkitbenchmarker/context.py\n@@ -63,23 +63,24 @@\n else:\n return flags.FLAGS\n \n- def __setattr__(self, name, value):\n- self._thread_flag_values.__setattr__(name, value)\n \n- def __getattr__(self, name):\n- return self._thread_flag_values.__getattr__(name)\n-\n- def __setitem__(self, key, value):\n- self._thread_flag_values.__setitem__(key, value)\n-\n- def __getitem__(self, key):\n- return self._thread_flag_values.__getitem__(key)\n-\n- def __call__(self, argv):\n- return self._thread_flag_values.__call__(argv)\n-\n- def FlagDict(self):\n- return self._thread_flag_values.FlagDict()\n+def _AddProxyMethod(f_name):\n+ \"\"\"Adds a method to FlagValuesProxy that forwards to _thread_flag_values.\"\"\"\n+ def f(self, *args, **kwargs):\n+ return getattr(self._thread_flag_values, f_name)(*args, **kwargs)\n+ f.__name__ = f_name\n+ f.__doc__ = 'Proxied ' + f_name\n+ setattr(FlagValuesProxy, f_name, f)\n+\n+\n+# TODO: introduce a more generic proxy.\n+for _f_name in ['FlagDict', 'Reset', 'SetDefault', 'RegisteredFlags',\n+ 'FlagValuesDict', '__contains__', '__iter__', '__call__',\n+ '__setattr__', '__getattr__', '__setitem__', '__getitem__',\n+ '__str__']:\n+ _AddProxyMethod(_f_name)\n+del _f_name\n+del _AddProxyMethod\n \n \n BENCHMARK_FLAGS = FlagValuesProxy()\n", "issue": "Help doesn't render with FlagValuesProxy.\nExample:\n\n```\n[:~/git/PerfKitBenchmarker] [perfkit] release-0.23.0+* 1 \u00b1 python pkb.py --benchmarks redis_ycsb --machine_type n1-standard-4 --json_output redis_ycsb.json\nERROR:root:Unknown command line flag 'json_output'\nUsage: pkb.py ARGS\n<perfkitbenchmarker.context.FlagValuesProxy object at 0x7f51910bc050>\n```\n\n@ehankland - do you have a minute to look at this? If not assign back to me.\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for working with the current thread context.\"\"\"\n\nimport threading\n\nimport gflags as flags\n\n\nclass FlagsModuleProxy(object):\n \"\"\"Class which acts as a proxy for the flags module.\n\n When the FLAGS attribute is accessed, BENCHMARK_FLAGS will be returned\n rather than the global FlagValues object. BENCHMARK_FLAGS is an instance\n of FlagValuesProxy, which enables benchmarks to run with different and\n even conflicting flags. Accessing the GLOBAL_FLAGS attribute will return\n the global FlagValues object. Otherwise, this will behave just like the\n flags module.\n \"\"\"\n\n def __getattr__(self, name):\n if name == 'FLAGS':\n return BENCHMARK_FLAGS\n elif name == 'GLOBAL_FLAGS':\n return flags.FLAGS\n return flags.__dict__[name]\n\n\nclass FlagValuesProxy(object):\n \"\"\"Class which provides the same interface as FlagValues.\n\n By acting as a proxy for the FlagValues object (i.e. flags.FLAGS),\n this enables benchmark specific flags. This proxy attempts to\n use the current thread's BenchmarkSpec's FlagValues object, but\n falls back to using flags.FLAGS if the thread has no BenchmarkSpec\n object.\n \"\"\"\n\n @property\n def _thread_flag_values(self):\n \"\"\"Returns the correct FlagValues object for the current thread.\n\n This first tries to get the BenchmarkSpec object corresponding to the\n current thread. If there is one, it returns that spec's FlagValues\n object. If there isn't one, it will return the global FlagValues\n object.\n \"\"\"\n benchmark_spec = GetThreadBenchmarkSpec()\n if benchmark_spec:\n return benchmark_spec.FLAGS\n else:\n return flags.FLAGS\n\n def __setattr__(self, name, value):\n self._thread_flag_values.__setattr__(name, value)\n\n def __getattr__(self, name):\n return self._thread_flag_values.__getattr__(name)\n\n def __setitem__(self, key, value):\n self._thread_flag_values.__setitem__(key, value)\n\n def __getitem__(self, key):\n return self._thread_flag_values.__getitem__(key)\n\n def __call__(self, argv):\n return self._thread_flag_values.__call__(argv)\n\n def FlagDict(self):\n return self._thread_flag_values.FlagDict()\n\n\nBENCHMARK_FLAGS = FlagValuesProxy()\n\n\nclass _ThreadData(threading.local):\n def __init__(self):\n self.benchmark_spec = None\n\n\n_thread_local = _ThreadData()\n\n\ndef SetThreadBenchmarkSpec(benchmark_spec):\n \"\"\"Sets the current thread's BenchmarkSpec object.\"\"\"\n _thread_local.benchmark_spec = benchmark_spec\n\n\ndef GetThreadBenchmarkSpec():\n \"\"\"Gets the current thread's BenchmarkSpec object.\n\n If SetThreadBenchmarkSpec() has not been called in either the current thread\n or in an ancestor, then this method will return None by default.\n \"\"\"\n return _thread_local.benchmark_spec\n", "path": "perfkitbenchmarker/context.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for working with the current thread context.\"\"\"\n\nimport threading\n\nimport gflags as flags\n\n\nclass FlagsModuleProxy(object):\n \"\"\"Class which acts as a proxy for the flags module.\n\n When the FLAGS attribute is accessed, BENCHMARK_FLAGS will be returned\n rather than the global FlagValues object. BENCHMARK_FLAGS is an instance\n of FlagValuesProxy, which enables benchmarks to run with different and\n even conflicting flags. Accessing the GLOBAL_FLAGS attribute will return\n the global FlagValues object. Otherwise, this will behave just like the\n flags module.\n \"\"\"\n\n def __getattr__(self, name):\n if name == 'FLAGS':\n return BENCHMARK_FLAGS\n elif name == 'GLOBAL_FLAGS':\n return flags.FLAGS\n return flags.__dict__[name]\n\n\nclass FlagValuesProxy(object):\n \"\"\"Class which provides the same interface as FlagValues.\n\n By acting as a proxy for the FlagValues object (i.e. flags.FLAGS),\n this enables benchmark specific flags. This proxy attempts to\n use the current thread's BenchmarkSpec's FlagValues object, but\n falls back to using flags.FLAGS if the thread has no BenchmarkSpec\n object.\n \"\"\"\n\n @property\n def _thread_flag_values(self):\n \"\"\"Returns the correct FlagValues object for the current thread.\n\n This first tries to get the BenchmarkSpec object corresponding to the\n current thread. If there is one, it returns that spec's FlagValues\n object. If there isn't one, it will return the global FlagValues\n object.\n \"\"\"\n benchmark_spec = GetThreadBenchmarkSpec()\n if benchmark_spec:\n return benchmark_spec.FLAGS\n else:\n return flags.FLAGS\n\n\ndef _AddProxyMethod(f_name):\n \"\"\"Adds a method to FlagValuesProxy that forwards to _thread_flag_values.\"\"\"\n def f(self, *args, **kwargs):\n return getattr(self._thread_flag_values, f_name)(*args, **kwargs)\n f.__name__ = f_name\n f.__doc__ = 'Proxied ' + f_name\n setattr(FlagValuesProxy, f_name, f)\n\n\n# TODO: introduce a more generic proxy.\nfor _f_name in ['FlagDict', 'Reset', 'SetDefault', 'RegisteredFlags',\n 'FlagValuesDict', '__contains__', '__iter__', '__call__',\n '__setattr__', '__getattr__', '__setitem__', '__getitem__',\n '__str__']:\n _AddProxyMethod(_f_name)\ndel _f_name\ndel _AddProxyMethod\n\n\nBENCHMARK_FLAGS = FlagValuesProxy()\n\n\nclass _ThreadData(threading.local):\n def __init__(self):\n self.benchmark_spec = None\n\n\n_thread_local = _ThreadData()\n\n\ndef SetThreadBenchmarkSpec(benchmark_spec):\n \"\"\"Sets the current thread's BenchmarkSpec object.\"\"\"\n _thread_local.benchmark_spec = benchmark_spec\n\n\ndef GetThreadBenchmarkSpec():\n \"\"\"Gets the current thread's BenchmarkSpec object.\n\n If SetThreadBenchmarkSpec() has not been called in either the current thread\n or in an ancestor, then this method will return None by default.\n \"\"\"\n return _thread_local.benchmark_spec\n", "path": "perfkitbenchmarker/context.py"}]} | 1,394 | 408 |
gh_patches_debug_5764 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix pitest warning, recheck dependencies versions.
* Cookiecutter version:master
```
py27 run-test: commands[1] | /home/insspb/git/cookiecutter/.tox/py27/bin/python /snap/pycharm-professional/196/plugins/python/helpers/pycharm/_jb_pytest_runner.py --offset 10001 -- --cov=cookiecutter tests
/home/insspb/git/cookiecutter/.tox/py27/lib/python2.7/site-packages/_pytest/config/__init__.py:316: PytestConfigWarning: pytest-catchlog plugin has been merged into the core, please remove it from your requirements.
name.replace("_", "-")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """cookiecutter distutils configuration."""
5
6 import os
7 import io
8 import sys
9
10 from setuptools import setup
11
12 version = "1.7.0"
13
14 if sys.argv[-1] == 'publish':
15 os.system('python setup.py sdist upload')
16 os.system('python setup.py bdist_wheel upload')
17 sys.exit()
18
19 if sys.argv[-1] == 'tag':
20 os.system("git tag -a %s -m 'version %s'" % (version, version))
21 os.system("git push --tags")
22 sys.exit()
23
24 with io.open('README.md', 'r', encoding='utf-8') as readme_file:
25 readme = readme_file.read()
26
27 requirements = [
28 'binaryornot>=0.2.0',
29 'jinja2>=2.7',
30 'click>=7.0',
31 'poyo>=0.1.0',
32 'jinja2-time>=0.1.0',
33 'python-slugify>=4.0.0',
34 'requests>=2.18.0',
35 'six>=1.10',
36 ]
37
38 if sys.argv[-1] == 'readme':
39 print(readme)
40 sys.exit()
41
42
43 setup(
44 name='cookiecutter',
45 version=version,
46 description=('A command-line utility that creates projects from project '
47 'templates, e.g. creating a Python package project from a '
48 'Python package project template.'),
49 long_description=readme,
50 long_description_content_type='text/markdown',
51 author='Audrey Roy',
52 author_email='[email protected]',
53 url='https://github.com/cookiecutter/cookiecutter',
54 packages=[
55 'cookiecutter',
56 ],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={
59 'console_scripts': [
60 'cookiecutter = cookiecutter.__main__:main',
61 ]
62 },
63 include_package_data=True,
64 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
65 install_requires=requirements,
66 extras_require={
67 ':python_version<"3.3"': ['whichcraft>=0.4.0'],
68 },
69 license='BSD',
70 zip_safe=False,
71 classifiers=[
72 "Development Status :: 5 - Production/Stable",
73 "Environment :: Console",
74 "Intended Audience :: Developers",
75 "Natural Language :: English",
76 "License :: OSI Approved :: BSD License",
77 "Programming Language :: Python",
78 "Programming Language :: Python :: 2",
79 "Programming Language :: Python :: 2.7",
80 "Programming Language :: Python :: 3",
81 "Programming Language :: Python :: 3.5",
82 "Programming Language :: Python :: 3.6",
83 "Programming Language :: Python :: 3.7",
84 "Programming Language :: Python :: 3.8",
85 "Programming Language :: Python :: Implementation :: CPython",
86 "Programming Language :: Python :: Implementation :: PyPy",
87 "Topic :: Software Development",
88 ],
89 keywords=(
90 'cookiecutter, Python, projects, project templates, Jinja2, '
91 'skeleton, scaffolding, project directory, setup.py, package, '
92 'packaging'
93 ),
94 )
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,14 +25,15 @@
readme = readme_file.read()
requirements = [
- 'binaryornot>=0.2.0',
- 'jinja2>=2.7',
- 'click>=7.0',
- 'poyo>=0.1.0',
- 'jinja2-time>=0.1.0',
+ 'binaryornot>=0.4.4',
+ 'Jinja2<=2.11.0',
+ 'click>=7.1.1',
+ 'poyo>=0.5.0',
+ 'jinja2-time>=0.2.0',
'python-slugify>=4.0.0',
- 'requests>=2.18.0',
- 'six>=1.10',
+ 'requests>=2.23.0',
+ 'six>=1.14',
+ 'MarkupSafe<2.0.0'
]
if sys.argv[-1] == 'readme':
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,14 +25,15 @@\n readme = readme_file.read()\n \n requirements = [\n- 'binaryornot>=0.2.0',\n- 'jinja2>=2.7',\n- 'click>=7.0',\n- 'poyo>=0.1.0',\n- 'jinja2-time>=0.1.0',\n+ 'binaryornot>=0.4.4',\n+ 'Jinja2<=2.11.0',\n+ 'click>=7.1.1',\n+ 'poyo>=0.5.0',\n+ 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n- 'requests>=2.18.0',\n- 'six>=1.10',\n+ 'requests>=2.23.0',\n+ 'six>=1.14',\n+ 'MarkupSafe<2.0.0'\n ]\n \n if sys.argv[-1] == 'readme':\n", "issue": "Fix pitest warning, recheck dependencies versions.\n* Cookiecutter version:master\r\n\r\n```\r\npy27 run-test: commands[1] | /home/insspb/git/cookiecutter/.tox/py27/bin/python /snap/pycharm-professional/196/plugins/python/helpers/pycharm/_jb_pytest_runner.py --offset 10001 -- --cov=cookiecutter tests\r\n/home/insspb/git/cookiecutter/.tox/py27/lib/python2.7/site-packages/_pytest/config/__init__.py:316: PytestConfigWarning: pytest-catchlog plugin has been merged into the core, please remove it from your requirements.\r\n name.replace(\"_\", \"-\")\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=7.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.18.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={\n ':python_version<\"3.3\"': ['whichcraft>=0.4.0'],\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2<=2.11.0',\n 'click>=7.1.1',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'six>=1.14',\n 'MarkupSafe<2.0.0'\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={\n ':python_version<\"3.3\"': ['whichcraft>=0.4.0'],\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]} | 1,314 | 250 |
gh_patches_debug_25991 | rasdani/github-patches | git_diff | nvaccess__nvda-11605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Chrome: "list" is reported on every line of a list in rich text editors
### Steps to reproduce:
1. Open this URL in Chrome:
`data:text/html,<div contentEditable="true" role="textbox" aria-multiline="true">Before<ul><li>a</li><li>b</li></ul>After</div>`
2. Focus the text box and ensure you are in focus mode.
3. Press control+home.
4. Read through the content line by line using the down arrow key.
### Expected behavior:
```
Before
list bullet a
bullet b
out of list After
```
### Actual behavior:
```
Before
list bullet a
list bullet b
After
```
Note: Whether you hear "bullet" depends on your symbol level; I have mine set to "all".
### System configuration:
NVDA version: next-14373,6bbe5915
NVDA Installed or portable: installed
Windows version: Windows 10 Version 1703 (OS Build 16251.0)
Name and version of other software in use when reproducing the issue: Chrome Version 62.0.3201.2 (Official Build) canary (64-bit)
### Technical info:
This happens because a contentEditable list (the `ul` tag) does not get the read-only state. Lists and list boxes both get the same role (list), but they're normally differentiated by the read-only state; a `<ul>` has read-only, whereas a `<select size="2">` doesn't. However, in this case, I can kinda understand why Chrome doesn't set read-only; after all, it does have the editable state.
I think we should probably just tweak `TextInfo.getPresentationCategory` to treat editable liss as being containers; i.e. allow for the editable state as well as the read-only state in the rule for `PRESCAT_CONTAINER`. Alternatively, we could file a bug against Chrome requesting this get fixed on their side.
P2 because this is quite annoying when dealing with rich text editors in Chrome, including the Gmail composer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/NVDAObjects/IAccessible/chromium.py`
Content:
```
1 #NVDAObjects/IAccessible/chromium.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #This file is covered by the GNU General Public License.
4 #See the file COPYING for more details.
5 # Copyright (C) 2010-2013 NV Access Limited
6
7 """NVDAObjects for the Chromium browser project
8 """
9
10 from comtypes import COMError
11 import oleacc
12 import controlTypes
13 import IAccessibleHandler
14 from NVDAObjects.IAccessible import IAccessible
15 from virtualBuffers.gecko_ia2 import Gecko_ia2 as GeckoVBuf, Gecko_ia2_TextInfo as GeckoVBufTextInfo
16 from . import ia2Web
17
18
19 class ChromeVBufTextInfo(GeckoVBufTextInfo):
20
21 def _normalizeControlField(self, attrs):
22 attrs = super()._normalizeControlField(attrs)
23 if attrs['role'] == controlTypes.ROLE_TOGGLEBUTTON and controlTypes.STATE_CHECKABLE in attrs['states']:
24 # In Chromium, the checkable state is exposed erroneously on toggle buttons.
25 attrs['states'].discard(controlTypes.STATE_CHECKABLE)
26 return attrs
27
28
29 class ChromeVBuf(GeckoVBuf):
30 TextInfo = ChromeVBufTextInfo
31
32 def __contains__(self, obj):
33 if obj.windowHandle != self.rootNVDAObject.windowHandle:
34 return False
35 if not isinstance(obj,ia2Web.Ia2Web):
36 # #4080: Input composition NVDAObjects are the same window but not IAccessible2!
37 return False
38 accId = obj.IA2UniqueID
39 if accId == self.rootID:
40 return True
41 try:
42 self.rootNVDAObject.IAccessibleObject.accChild(accId)
43 except COMError:
44 return False
45 return not self._isNVDAObjectInApplication(obj)
46
47
48 class Document(ia2Web.Document):
49
50 def _get_treeInterceptorClass(self):
51 states = self.states
52 if controlTypes.STATE_EDITABLE not in states and controlTypes.STATE_BUSY not in states:
53 return ChromeVBuf
54 return super(Document, self).treeInterceptorClass
55
56 class ComboboxListItem(IAccessible):
57 """
58 Represents a list item inside a combo box.
59 """
60
61 def _get_focusRedirect(self):
62 # Chrome 68 and below fires focus on the active list item of combo boxes even when the combo box is collapsed.
63 # We get around this by redirecting focus back up to the combo box itself if the list inside is invisible (I.e. the combo box is collapsed).
64 if self.parent and controlTypes.STATE_INVISIBLE in self.parent.states:
65 return self.parent.parent
66
67
68 class ToggleButton(ia2Web.Ia2Web):
69
70 def _get_states(self):
71 # In Chromium, the checkable state is exposed erroneously on toggle buttons.
72 states = super().states
73 states.discard(controlTypes.STATE_CHECKABLE)
74 return states
75
76
77 def findExtraOverlayClasses(obj, clsList):
78 """Determine the most appropriate class(es) for Chromium objects.
79 This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.
80 """
81 if obj.role==controlTypes.ROLE_LISTITEM and obj.parent and obj.parent.parent and obj.parent.parent.role==controlTypes.ROLE_COMBOBOX:
82 clsList.append(ComboboxListItem)
83 elif obj.role == controlTypes.ROLE_TOGGLEBUTTON:
84 clsList.append(ToggleButton)
85 ia2Web.findExtraOverlayClasses(obj, clsList,
86 documentClass=Document)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/NVDAObjects/IAccessible/chromium.py b/source/NVDAObjects/IAccessible/chromium.py
--- a/source/NVDAObjects/IAccessible/chromium.py
+++ b/source/NVDAObjects/IAccessible/chromium.py
@@ -74,6 +74,22 @@
return states
+class PresentationalList(ia2Web.Ia2Web):
+ """
+ Ensures that lists like UL, DL and OL always have the readonly state.
+ A work-around for issue #7562
+ allowing us to differentiate presentational lists from interactive lists
+ (such as of size greater 1 and ARIA list boxes).
+ In firefox, this is possible by the presence of a read-only state,
+ even in a content editable.
+ """
+
+ def _get_states(self):
+ states = super().states
+ states.add(controlTypes.STATE_READONLY)
+ return states
+
+
def findExtraOverlayClasses(obj, clsList):
"""Determine the most appropriate class(es) for Chromium objects.
This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.
@@ -82,5 +98,7 @@
clsList.append(ComboboxListItem)
elif obj.role == controlTypes.ROLE_TOGGLEBUTTON:
clsList.append(ToggleButton)
+ elif obj.role == controlTypes.ROLE_LIST and obj.IA2Attributes.get('tag') in ('ul', 'dl', 'ol'):
+ clsList.append(PresentationalList)
ia2Web.findExtraOverlayClasses(obj, clsList,
documentClass=Document)
| {"golden_diff": "diff --git a/source/NVDAObjects/IAccessible/chromium.py b/source/NVDAObjects/IAccessible/chromium.py\n--- a/source/NVDAObjects/IAccessible/chromium.py\n+++ b/source/NVDAObjects/IAccessible/chromium.py\n@@ -74,6 +74,22 @@\n \t\treturn states\r\n \r\n \r\n+class PresentationalList(ia2Web.Ia2Web):\r\n+\t\"\"\"\r\n+\tEnsures that lists like UL, DL and OL always have the readonly state.\r\n+\tA work-around for issue #7562\r\n+\tallowing us to differentiate presentational lists from interactive lists\r\n+\t(such as of size greater 1 and ARIA list boxes).\r\n+\tIn firefox, this is possible by the presence of a read-only state,\r\n+\teven in a content editable.\r\n+\t\"\"\"\r\n+\r\n+\tdef _get_states(self):\r\n+\t\tstates = super().states\r\n+\t\tstates.add(controlTypes.STATE_READONLY)\r\n+\t\treturn states\r\n+\r\n+\r\n def findExtraOverlayClasses(obj, clsList):\r\n \t\"\"\"Determine the most appropriate class(es) for Chromium objects.\r\n \tThis works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.\r\n@@ -82,5 +98,7 @@\n \t\tclsList.append(ComboboxListItem)\r\n \telif obj.role == controlTypes.ROLE_TOGGLEBUTTON:\r\n \t\tclsList.append(ToggleButton)\r\n+\telif obj.role == controlTypes.ROLE_LIST and obj.IA2Attributes.get('tag') in ('ul', 'dl', 'ol'):\r\n+\t\tclsList.append(PresentationalList)\r\n \tia2Web.findExtraOverlayClasses(obj, clsList,\r\n \t\tdocumentClass=Document)\n", "issue": "Chrome: \"list\" is reported on every line of a list in rich text editors\n### Steps to reproduce:\r\n1. Open this URL in Chrome:\r\n `data:text/html,<div contentEditable=\"true\" role=\"textbox\" aria-multiline=\"true\">Before<ul><li>a</li><li>b</li></ul>After</div>`\r\n2. Focus the text box and ensure you are in focus mode.\r\n3. Press control+home.\r\n4. Read through the content line by line using the down arrow key.\r\n\r\n### Expected behavior:\r\n```\r\nBefore\r\nlist bullet a\r\nbullet b\r\nout of list After\r\n```\r\n\r\n### Actual behavior:\r\n```\r\nBefore\r\nlist bullet a\r\nlist bullet b\r\nAfter\r\n```\r\n\r\nNote: Whether you hear \"bullet\" depends on your symbol level; I have mine set to \"all\".\r\n\r\n### System configuration:\r\nNVDA version: next-14373,6bbe5915\r\nNVDA Installed or portable: installed\r\nWindows version: Windows 10 Version 1703 (OS Build 16251.0)\r\nName and version of other software in use when reproducing the issue: Chrome Version 62.0.3201.2 (Official Build) canary (64-bit)\r\n\r\n### Technical info:\r\nThis happens because a contentEditable list (the `ul` tag) does not get the read-only state. Lists and list boxes both get the same role (list), but they're normally differentiated by the read-only state; a `<ul>` has read-only, whereas a `<select size=\"2\">` doesn't. However, in this case, I can kinda understand why Chrome doesn't set read-only; after all, it does have the editable state.\r\n\r\nI think we should probably just tweak `TextInfo.getPresentationCategory` to treat editable liss as being containers; i.e. allow for the editable state as well as the read-only state in the rule for `PRESCAT_CONTAINER`. Alternatively, we could file a bug against Chrome requesting this get fixed on their side.\r\n\r\nP2 because this is quite annoying when dealing with rich text editors in Chrome, including the Gmail composer.\n", "before_files": [{"content": "#NVDAObjects/IAccessible/chromium.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n# Copyright (C) 2010-2013 NV Access Limited\r\n\r\n\"\"\"NVDAObjects for the Chromium browser project\r\n\"\"\"\r\n\r\nfrom comtypes import COMError\r\nimport oleacc\r\nimport controlTypes\r\nimport IAccessibleHandler\r\nfrom NVDAObjects.IAccessible import IAccessible\r\nfrom virtualBuffers.gecko_ia2 import Gecko_ia2 as GeckoVBuf, Gecko_ia2_TextInfo as GeckoVBufTextInfo\r\nfrom . import ia2Web\r\n\r\n\r\nclass ChromeVBufTextInfo(GeckoVBufTextInfo):\r\n\r\n\tdef _normalizeControlField(self, attrs):\r\n\t\tattrs = super()._normalizeControlField(attrs)\r\n\t\tif attrs['role'] == controlTypes.ROLE_TOGGLEBUTTON and controlTypes.STATE_CHECKABLE in attrs['states']:\r\n\t\t\t# In Chromium, the checkable state is exposed erroneously on toggle buttons.\r\n\t\t\tattrs['states'].discard(controlTypes.STATE_CHECKABLE)\r\n\t\treturn attrs\r\n\r\n\r\nclass ChromeVBuf(GeckoVBuf):\r\n\tTextInfo = ChromeVBufTextInfo\r\n\r\n\tdef __contains__(self, obj):\r\n\t\tif obj.windowHandle != self.rootNVDAObject.windowHandle:\r\n\t\t\treturn False\r\n\t\tif not isinstance(obj,ia2Web.Ia2Web):\r\n\t\t\t# #4080: Input composition NVDAObjects are the same window but not IAccessible2!\r\n\t\t\treturn False\r\n\t\taccId = obj.IA2UniqueID\r\n\t\tif accId == self.rootID:\r\n\t\t\treturn True\r\n\t\ttry:\r\n\t\t\tself.rootNVDAObject.IAccessibleObject.accChild(accId)\r\n\t\texcept COMError:\r\n\t\t\treturn False\r\n\t\treturn not self._isNVDAObjectInApplication(obj)\r\n\r\n\r\nclass Document(ia2Web.Document):\r\n\r\n\tdef _get_treeInterceptorClass(self):\r\n\t\tstates = self.states\r\n\t\tif controlTypes.STATE_EDITABLE not in states and controlTypes.STATE_BUSY not in states:\r\n\t\t\treturn ChromeVBuf\r\n\t\treturn super(Document, self).treeInterceptorClass\r\n\r\nclass ComboboxListItem(IAccessible):\r\n\t\"\"\"\r\n\tRepresents a list item inside a combo box.\r\n\t\"\"\"\r\n\r\n\tdef _get_focusRedirect(self):\r\n\t\t# Chrome 68 and below fires focus on the active list item of combo boxes even when the combo box is collapsed.\r\n\t\t# We get around this by redirecting focus back up to the combo box itself if the list inside is invisible (I.e. the combo box is collapsed).\r\n\t\tif self.parent and controlTypes.STATE_INVISIBLE in self.parent.states:\r\n\t\t\treturn self.parent.parent\r\n\r\n\r\nclass ToggleButton(ia2Web.Ia2Web):\r\n\r\n\tdef _get_states(self):\r\n\t\t# In Chromium, the checkable state is exposed erroneously on toggle buttons.\r\n\t\tstates = super().states\r\n\t\tstates.discard(controlTypes.STATE_CHECKABLE)\r\n\t\treturn states\r\n\r\n\r\ndef findExtraOverlayClasses(obj, clsList):\r\n\t\"\"\"Determine the most appropriate class(es) for Chromium objects.\r\n\tThis works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.\r\n\t\"\"\"\r\n\tif obj.role==controlTypes.ROLE_LISTITEM and obj.parent and obj.parent.parent and obj.parent.parent.role==controlTypes.ROLE_COMBOBOX:\r\n\t\tclsList.append(ComboboxListItem)\r\n\telif obj.role == controlTypes.ROLE_TOGGLEBUTTON:\r\n\t\tclsList.append(ToggleButton)\r\n\tia2Web.findExtraOverlayClasses(obj, clsList,\r\n\t\tdocumentClass=Document)\r\n", "path": "source/NVDAObjects/IAccessible/chromium.py"}], "after_files": [{"content": "#NVDAObjects/IAccessible/chromium.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n# Copyright (C) 2010-2013 NV Access Limited\r\n\r\n\"\"\"NVDAObjects for the Chromium browser project\r\n\"\"\"\r\n\r\nfrom comtypes import COMError\r\nimport oleacc\r\nimport controlTypes\r\nimport IAccessibleHandler\r\nfrom NVDAObjects.IAccessible import IAccessible\r\nfrom virtualBuffers.gecko_ia2 import Gecko_ia2 as GeckoVBuf, Gecko_ia2_TextInfo as GeckoVBufTextInfo\r\nfrom . import ia2Web\r\n\r\n\r\nclass ChromeVBufTextInfo(GeckoVBufTextInfo):\r\n\r\n\tdef _normalizeControlField(self, attrs):\r\n\t\tattrs = super()._normalizeControlField(attrs)\r\n\t\tif attrs['role'] == controlTypes.ROLE_TOGGLEBUTTON and controlTypes.STATE_CHECKABLE in attrs['states']:\r\n\t\t\t# In Chromium, the checkable state is exposed erroneously on toggle buttons.\r\n\t\t\tattrs['states'].discard(controlTypes.STATE_CHECKABLE)\r\n\t\treturn attrs\r\n\r\n\r\nclass ChromeVBuf(GeckoVBuf):\r\n\tTextInfo = ChromeVBufTextInfo\r\n\r\n\tdef __contains__(self, obj):\r\n\t\tif obj.windowHandle != self.rootNVDAObject.windowHandle:\r\n\t\t\treturn False\r\n\t\tif not isinstance(obj,ia2Web.Ia2Web):\r\n\t\t\t# #4080: Input composition NVDAObjects are the same window but not IAccessible2!\r\n\t\t\treturn False\r\n\t\taccId = obj.IA2UniqueID\r\n\t\tif accId == self.rootID:\r\n\t\t\treturn True\r\n\t\ttry:\r\n\t\t\tself.rootNVDAObject.IAccessibleObject.accChild(accId)\r\n\t\texcept COMError:\r\n\t\t\treturn False\r\n\t\treturn not self._isNVDAObjectInApplication(obj)\r\n\r\n\r\nclass Document(ia2Web.Document):\r\n\r\n\tdef _get_treeInterceptorClass(self):\r\n\t\tstates = self.states\r\n\t\tif controlTypes.STATE_EDITABLE not in states and controlTypes.STATE_BUSY not in states:\r\n\t\t\treturn ChromeVBuf\r\n\t\treturn super(Document, self).treeInterceptorClass\r\n\r\nclass ComboboxListItem(IAccessible):\r\n\t\"\"\"\r\n\tRepresents a list item inside a combo box.\r\n\t\"\"\"\r\n\r\n\tdef _get_focusRedirect(self):\r\n\t\t# Chrome 68 and below fires focus on the active list item of combo boxes even when the combo box is collapsed.\r\n\t\t# We get around this by redirecting focus back up to the combo box itself if the list inside is invisible (I.e. the combo box is collapsed).\r\n\t\tif self.parent and controlTypes.STATE_INVISIBLE in self.parent.states:\r\n\t\t\treturn self.parent.parent\r\n\r\n\r\nclass ToggleButton(ia2Web.Ia2Web):\r\n\r\n\tdef _get_states(self):\r\n\t\t# In Chromium, the checkable state is exposed erroneously on toggle buttons.\r\n\t\tstates = super().states\r\n\t\tstates.discard(controlTypes.STATE_CHECKABLE)\r\n\t\treturn states\r\n\r\n\r\nclass PresentationalList(ia2Web.Ia2Web):\r\n\t\"\"\"\r\n\tEnsures that lists like UL, DL and OL always have the readonly state.\r\n\tA work-around for issue #7562\r\n\tallowing us to differentiate presentational lists from interactive lists\r\n\t(such as of size greater 1 and ARIA list boxes).\r\n\tIn firefox, this is possible by the presence of a read-only state,\r\n\teven in a content editable.\r\n\t\"\"\"\r\n\r\n\tdef _get_states(self):\r\n\t\tstates = super().states\r\n\t\tstates.add(controlTypes.STATE_READONLY)\r\n\t\treturn states\r\n\r\n\r\ndef findExtraOverlayClasses(obj, clsList):\r\n\t\"\"\"Determine the most appropriate class(es) for Chromium objects.\r\n\tThis works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.\r\n\t\"\"\"\r\n\tif obj.role==controlTypes.ROLE_LISTITEM and obj.parent and obj.parent.parent and obj.parent.parent.role==controlTypes.ROLE_COMBOBOX:\r\n\t\tclsList.append(ComboboxListItem)\r\n\telif obj.role == controlTypes.ROLE_TOGGLEBUTTON:\r\n\t\tclsList.append(ToggleButton)\r\n\telif obj.role == controlTypes.ROLE_LIST and obj.IA2Attributes.get('tag') in ('ul', 'dl', 'ol'):\r\n\t\tclsList.append(PresentationalList)\r\n\tia2Web.findExtraOverlayClasses(obj, clsList,\r\n\t\tdocumentClass=Document)\r\n", "path": "source/NVDAObjects/IAccessible/chromium.py"}]} | 1,677 | 378 |
gh_patches_debug_54061 | rasdani/github-patches | git_diff | docker__docker-py-2793 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Couldn't create secret object
I couldn't create secret object, the problem seemed to boil down to the way that a secret was being created from the docker daemon response.
https://github.com/docker/docker-py/blob/467cacb00d8dce68aa8ff2bdacc85acecd2d1207/docker/models/secrets.py#L31-L33
Docker version 18.03.1-ce and python version 3.5 had the following error:
````
File "docker/models/secrets.py", line 10 in __repr__
return "<%s: %s'>" % (self.__class__.__name__, self.name)
File "docker/models/secrets.py", line 14 in name
return self.attrs['Spec']['Name']
KeyError: 'Spec'
````
When calling:
````
import docker
client -docker.from_env()
mySecret = client.secrets.create(name='randomName', data='platform_node_requirements.md')
````
Changing the code to the following seemed to fix it.
````
obj = self.client.api.create_secret(**kwargs)
secret = self.client.secrets.get(obj.get('ID'))
return self.prepare_model(secret)
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/models/secrets.py`
Content:
```
1 from ..api import APIClient
2 from .resource import Model, Collection
3
4
5 class Secret(Model):
6 """A secret."""
7 id_attribute = 'ID'
8
9 def __repr__(self):
10 return "<%s: '%s'>" % (self.__class__.__name__, self.name)
11
12 @property
13 def name(self):
14 return self.attrs['Spec']['Name']
15
16 def remove(self):
17 """
18 Remove this secret.
19
20 Raises:
21 :py:class:`docker.errors.APIError`
22 If secret failed to remove.
23 """
24 return self.client.api.remove_secret(self.id)
25
26
27 class SecretCollection(Collection):
28 """Secrets on the Docker server."""
29 model = Secret
30
31 def create(self, **kwargs):
32 obj = self.client.api.create_secret(**kwargs)
33 return self.prepare_model(obj)
34 create.__doc__ = APIClient.create_secret.__doc__
35
36 def get(self, secret_id):
37 """
38 Get a secret.
39
40 Args:
41 secret_id (str): Secret ID.
42
43 Returns:
44 (:py:class:`Secret`): The secret.
45
46 Raises:
47 :py:class:`docker.errors.NotFound`
48 If the secret does not exist.
49 :py:class:`docker.errors.APIError`
50 If the server returns an error.
51 """
52 return self.prepare_model(self.client.api.inspect_secret(secret_id))
53
54 def list(self, **kwargs):
55 """
56 List secrets. Similar to the ``docker secret ls`` command.
57
58 Args:
59 filters (dict): Server-side list filtering options.
60
61 Returns:
62 (list of :py:class:`Secret`): The secrets.
63
64 Raises:
65 :py:class:`docker.errors.APIError`
66 If the server returns an error.
67 """
68 resp = self.client.api.secrets(**kwargs)
69 return [self.prepare_model(obj) for obj in resp]
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/models/secrets.py b/docker/models/secrets.py
--- a/docker/models/secrets.py
+++ b/docker/models/secrets.py
@@ -30,6 +30,7 @@
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
| {"golden_diff": "diff --git a/docker/models/secrets.py b/docker/models/secrets.py\n--- a/docker/models/secrets.py\n+++ b/docker/models/secrets.py\n@@ -30,6 +30,7 @@\n \n def create(self, **kwargs):\n obj = self.client.api.create_secret(**kwargs)\n+ obj.setdefault(\"Spec\", {})[\"Name\"] = kwargs.get(\"name\")\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_secret.__doc__\n", "issue": "Couldn't create secret object\nI couldn't create secret object, the problem seemed to boil down to the way that a secret was being created from the docker daemon response. \r\n\r\nhttps://github.com/docker/docker-py/blob/467cacb00d8dce68aa8ff2bdacc85acecd2d1207/docker/models/secrets.py#L31-L33\r\n\r\nDocker version 18.03.1-ce and python version 3.5 had the following error:\r\n\r\n````\r\nFile \"docker/models/secrets.py\", line 10 in __repr__\r\nreturn \"<%s: %s'>\" % (self.__class__.__name__, self.name)\r\nFile \"docker/models/secrets.py\", line 14 in name\r\nreturn self.attrs['Spec']['Name']\r\nKeyError: 'Spec'\r\n\r\n````\r\n\r\nWhen calling: \r\n\r\n````\r\nimport docker\r\n\r\nclient -docker.from_env()\r\nmySecret = client.secrets.create(name='randomName', data='platform_node_requirements.md')\r\n\r\n````\r\n\r\nChanging the code to the following seemed to fix it. \r\n````\r\nobj = self.client.api.create_secret(**kwargs)\r\nsecret = self.client.secrets.get(obj.get('ID'))\r\nreturn self.prepare_model(secret)\r\n````\r\n\r\n\r\n\n", "before_files": [{"content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Secret(Model):\n \"\"\"A secret.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, self.name)\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this secret.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If secret failed to remove.\n \"\"\"\n return self.client.api.remove_secret(self.id)\n\n\nclass SecretCollection(Collection):\n \"\"\"Secrets on the Docker server.\"\"\"\n model = Secret\n\n def create(self, **kwargs):\n obj = self.client.api.create_secret(**kwargs)\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_secret.__doc__\n\n def get(self, secret_id):\n \"\"\"\n Get a secret.\n\n Args:\n secret_id (str): Secret ID.\n\n Returns:\n (:py:class:`Secret`): The secret.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the secret does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_secret(secret_id))\n\n def list(self, **kwargs):\n \"\"\"\n List secrets. Similar to the ``docker secret ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Secret`): The secrets.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.secrets(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/secrets.py"}], "after_files": [{"content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Secret(Model):\n \"\"\"A secret.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, self.name)\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this secret.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If secret failed to remove.\n \"\"\"\n return self.client.api.remove_secret(self.id)\n\n\nclass SecretCollection(Collection):\n \"\"\"Secrets on the Docker server.\"\"\"\n model = Secret\n\n def create(self, **kwargs):\n obj = self.client.api.create_secret(**kwargs)\n obj.setdefault(\"Spec\", {})[\"Name\"] = kwargs.get(\"name\")\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_secret.__doc__\n\n def get(self, secret_id):\n \"\"\"\n Get a secret.\n\n Args:\n secret_id (str): Secret ID.\n\n Returns:\n (:py:class:`Secret`): The secret.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the secret does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_secret(secret_id))\n\n def list(self, **kwargs):\n \"\"\"\n List secrets. Similar to the ``docker secret ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Secret`): The secrets.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.secrets(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/secrets.py"}]} | 1,048 | 101 |
gh_patches_debug_29970 | rasdani/github-patches | git_diff | inventree__InvenTree-1159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Quick view of what roles are assigned to each group

As above:
- Next to each group, show a column for each possible role
- For each cell, show which permissions are used (read / add / modify / delete)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/users/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.utils.translation import ugettext_lazy as _
5
6 from django.contrib import admin, messages
7 from django import forms
8 from django.contrib.auth import get_user_model
9 from django.contrib.admin.widgets import FilteredSelectMultiple
10 from django.contrib.auth.models import Group
11 from django.contrib.auth.admin import UserAdmin
12 from django.utils.safestring import mark_safe
13
14 from users.models import RuleSet
15
16 User = get_user_model()
17
18
19 class RuleSetInline(admin.TabularInline):
20 """
21 Class for displaying inline RuleSet data in the Group admin page.
22 """
23
24 model = RuleSet
25 can_delete = False
26 verbose_name = 'Ruleset'
27 verbose_plural_name = 'Rulesets'
28 fields = ['name'] + [option for option in RuleSet.RULE_OPTIONS]
29 readonly_fields = ['name']
30 max_num = len(RuleSet.RULESET_CHOICES)
31 min_num = 1
32 extra = 0
33
34
35 class InvenTreeGroupAdminForm(forms.ModelForm):
36 """
37 Custom admin form for the Group model.
38
39 Adds the ability for editing user membership directly in the group admin page.
40 """
41
42 class Meta:
43 model = Group
44 exclude = []
45 fields = [
46 'name',
47 'users',
48 ]
49
50 def __init__(self, *args, **kwargs):
51 super().__init__(*args, **kwargs)
52
53 if self.instance.pk:
54 # Populate the users field with the current Group users.
55 self.fields['users'].initial = self.instance.user_set.all()
56
57 # Add the users field.
58 users = forms.ModelMultipleChoiceField(
59 queryset=User.objects.all(),
60 required=False,
61 widget=FilteredSelectMultiple('users', False),
62 label=_('Users'),
63 help_text=_('Select which users are assigned to this group')
64 )
65
66 def save_m2m(self):
67 # Add the users to the Group.
68
69 self.instance.user_set.set(self.cleaned_data['users'])
70
71 def save(self, *args, **kwargs):
72 # Default save
73 instance = super().save()
74 # Save many-to-many data
75 self.save_m2m()
76 return instance
77
78
79 class RoleGroupAdmin(admin.ModelAdmin):
80 """
81 Custom admin interface for the Group model
82 """
83
84 form = InvenTreeGroupAdminForm
85
86 inlines = [
87 RuleSetInline,
88 ]
89
90 def get_formsets_with_inlines(self, request, obj=None):
91 for inline in self.get_inline_instances(request, obj):
92 # Hide RuleSetInline in the 'Add role' view
93 if not isinstance(inline, RuleSetInline) or obj is not None:
94 yield inline.get_formset(request, obj), inline
95
96 filter_horizontal = ['permissions']
97
98 def save_model(self, request, obj, form, change):
99 """
100 This method serves two purposes:
101 - show warning message whenever the group users belong to multiple groups
102 - skip saving of the group instance model as inlines needs to be saved before.
103 """
104
105 # Get form cleaned data
106 users = form.cleaned_data['users']
107
108 # Check for users who are members of multiple groups
109 warning_message = ''
110 for user in users:
111 if user.groups.all().count() > 1:
112 warning_message += f'<br>- <b>{user.username}</b> is member of: '
113 for idx, group in enumerate(user.groups.all()):
114 warning_message += f'<b>{group.name}</b>'
115 if idx < len(user.groups.all()) - 1:
116 warning_message += ', '
117
118 # If any, display warning message when group is saved
119 if warning_message:
120 warning_message = mark_safe(_(f'The following users are members of multiple groups:'
121 f'{warning_message}'))
122 messages.add_message(request, messages.WARNING, warning_message)
123
124 def save_formset(self, request, form, formset, change):
125 # Save inline Rulesets
126 formset.save()
127 # Save Group instance and update permissions
128 form.instance.save(update_fields=['name'])
129
130
131 class InvenTreeUserAdmin(UserAdmin):
132 """
133 Custom admin page for the User model.
134
135 Hides the "permissions" view as this is now handled
136 entirely by groups and RuleSets.
137
138 (And it's confusing!)
139 """
140
141 fieldsets = (
142 (None, {'fields': ('username', 'password')}),
143 (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
144 (_('Permissions'), {
145 'fields': ('is_active', 'is_staff', 'is_superuser', 'groups'),
146 }),
147 (_('Important dates'), {'fields': ('last_login', 'date_joined')}),
148 )
149
150
151 admin.site.unregister(Group)
152 admin.site.register(Group, RoleGroupAdmin)
153
154 admin.site.unregister(User)
155 admin.site.register(User, InvenTreeUserAdmin)
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/users/admin.py b/InvenTree/users/admin.py
--- a/InvenTree/users/admin.py
+++ b/InvenTree/users/admin.py
@@ -87,6 +87,64 @@
RuleSetInline,
]
+ list_display = ('name', 'admin', 'part', 'stock', 'build', 'purchase_order', 'sales_order')
+
+ def get_rule_set(self, obj, rule_set_type):
+ ''' Return list of permissions for the given ruleset '''
+
+ # Get all rulesets associated to object
+ rule_sets = RuleSet.objects.filter(group=obj.pk)
+
+ # Select ruleset based on type
+ for rule_set in rule_sets:
+ if rule_set.name == rule_set_type:
+ break
+
+ def append_permission_level(permission_level, next_level):
+ if not permission_level:
+ return next_level
+
+ if permission_level[:-1].endswith('|'):
+ permission_level += next_level
+ else:
+ permission_level += ' | ' + next_level
+
+ return permission_level
+
+ permission_level = ''
+
+ if rule_set.can_view:
+ permission_level = append_permission_level(permission_level, 'V')
+
+ if rule_set.can_add:
+ permission_level = append_permission_level(permission_level, 'A')
+
+ if rule_set.can_change:
+ permission_level = append_permission_level(permission_level, 'C')
+
+ if rule_set.can_delete:
+ permission_level = append_permission_level(permission_level, 'D')
+
+ return permission_level
+
+ def admin(self, obj):
+ return self.get_rule_set(obj, 'admin')
+
+ def part(self, obj):
+ return self.get_rule_set(obj, 'part')
+
+ def stock(self, obj):
+ return self.get_rule_set(obj, 'stock')
+
+ def build(self, obj):
+ return self.get_rule_set(obj, 'build')
+
+ def purchase_order(self, obj):
+ return self.get_rule_set(obj, 'purchase_order')
+
+ def sales_order(self, obj):
+ return self.get_rule_set(obj, 'sales_order')
+
def get_formsets_with_inlines(self, request, obj=None):
for inline in self.get_inline_instances(request, obj):
# Hide RuleSetInline in the 'Add role' view
| {"golden_diff": "diff --git a/InvenTree/users/admin.py b/InvenTree/users/admin.py\n--- a/InvenTree/users/admin.py\n+++ b/InvenTree/users/admin.py\n@@ -87,6 +87,64 @@\n RuleSetInline,\n ]\n \n+ list_display = ('name', 'admin', 'part', 'stock', 'build', 'purchase_order', 'sales_order')\n+\n+ def get_rule_set(self, obj, rule_set_type):\n+ ''' Return list of permissions for the given ruleset '''\n+\n+ # Get all rulesets associated to object\n+ rule_sets = RuleSet.objects.filter(group=obj.pk)\n+\n+ # Select ruleset based on type\n+ for rule_set in rule_sets:\n+ if rule_set.name == rule_set_type:\n+ break\n+\n+ def append_permission_level(permission_level, next_level):\n+ if not permission_level:\n+ return next_level\n+\n+ if permission_level[:-1].endswith('|'):\n+ permission_level += next_level\n+ else:\n+ permission_level += ' | ' + next_level\n+\n+ return permission_level\n+\n+ permission_level = ''\n+\n+ if rule_set.can_view:\n+ permission_level = append_permission_level(permission_level, 'V')\n+\n+ if rule_set.can_add:\n+ permission_level = append_permission_level(permission_level, 'A')\n+\n+ if rule_set.can_change:\n+ permission_level = append_permission_level(permission_level, 'C')\n+\n+ if rule_set.can_delete:\n+ permission_level = append_permission_level(permission_level, 'D')\n+ \n+ return permission_level\n+\n+ def admin(self, obj):\n+ return self.get_rule_set(obj, 'admin')\n+\n+ def part(self, obj):\n+ return self.get_rule_set(obj, 'part')\n+\n+ def stock(self, obj):\n+ return self.get_rule_set(obj, 'stock')\n+\n+ def build(self, obj):\n+ return self.get_rule_set(obj, 'build')\n+\n+ def purchase_order(self, obj):\n+ return self.get_rule_set(obj, 'purchase_order')\n+\n+ def sales_order(self, obj):\n+ return self.get_rule_set(obj, 'sales_order')\n+\n def get_formsets_with_inlines(self, request, obj=None):\n for inline in self.get_inline_instances(request, obj):\n # Hide RuleSetInline in the 'Add role' view\n", "issue": "Quick view of what roles are assigned to each group\n\r\n\r\nAs above:\r\n\r\n- Next to each group, show a column for each possible role\r\n- For each cell, show which permissions are used (read / add / modify / delete)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.contrib import admin, messages\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.safestring import mark_safe\n\nfrom users.models import RuleSet\n\nUser = get_user_model()\n\n\nclass RuleSetInline(admin.TabularInline):\n \"\"\"\n Class for displaying inline RuleSet data in the Group admin page.\n \"\"\"\n\n model = RuleSet\n can_delete = False\n verbose_name = 'Ruleset'\n verbose_plural_name = 'Rulesets'\n fields = ['name'] + [option for option in RuleSet.RULE_OPTIONS]\n readonly_fields = ['name']\n max_num = len(RuleSet.RULESET_CHOICES)\n min_num = 1\n extra = 0\n\n\nclass InvenTreeGroupAdminForm(forms.ModelForm):\n \"\"\"\n Custom admin form for the Group model.\n\n Adds the ability for editing user membership directly in the group admin page.\n \"\"\"\n\n class Meta:\n model = Group\n exclude = []\n fields = [\n 'name',\n 'users',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.instance.pk:\n # Populate the users field with the current Group users.\n self.fields['users'].initial = self.instance.user_set.all()\n\n # Add the users field.\n users = forms.ModelMultipleChoiceField(\n queryset=User.objects.all(),\n required=False,\n widget=FilteredSelectMultiple('users', False),\n label=_('Users'),\n help_text=_('Select which users are assigned to this group')\n )\n\n def save_m2m(self):\n # Add the users to the Group.\n\n self.instance.user_set.set(self.cleaned_data['users'])\n\n def save(self, *args, **kwargs):\n # Default save\n instance = super().save()\n # Save many-to-many data\n self.save_m2m()\n return instance\n\n\nclass RoleGroupAdmin(admin.ModelAdmin):\n \"\"\"\n Custom admin interface for the Group model\n \"\"\"\n\n form = InvenTreeGroupAdminForm\n\n inlines = [\n RuleSetInline,\n ]\n\n def get_formsets_with_inlines(self, request, obj=None):\n for inline in self.get_inline_instances(request, obj):\n # Hide RuleSetInline in the 'Add role' view\n if not isinstance(inline, RuleSetInline) or obj is not None:\n yield inline.get_formset(request, obj), inline\n\n filter_horizontal = ['permissions']\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n This method serves two purposes:\n - show warning message whenever the group users belong to multiple groups\n - skip saving of the group instance model as inlines needs to be saved before.\n \"\"\"\n\n # Get form cleaned data\n users = form.cleaned_data['users']\n\n # Check for users who are members of multiple groups\n warning_message = ''\n for user in users:\n if user.groups.all().count() > 1:\n warning_message += f'<br>- <b>{user.username}</b> is member of: '\n for idx, group in enumerate(user.groups.all()):\n warning_message += f'<b>{group.name}</b>'\n if idx < len(user.groups.all()) - 1:\n warning_message += ', '\n\n # If any, display warning message when group is saved\n if warning_message:\n warning_message = mark_safe(_(f'The following users are members of multiple groups:'\n f'{warning_message}'))\n messages.add_message(request, messages.WARNING, warning_message)\n\n def save_formset(self, request, form, formset, change):\n # Save inline Rulesets\n formset.save()\n # Save Group instance and update permissions\n form.instance.save(update_fields=['name'])\n\n\nclass InvenTreeUserAdmin(UserAdmin):\n \"\"\"\n Custom admin page for the User model.\n\n Hides the \"permissions\" view as this is now handled\n entirely by groups and RuleSets.\n\n (And it's confusing!)\n \"\"\"\n\n fieldsets = (\n (None, {'fields': ('username', 'password')}),\n (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),\n (_('Permissions'), {\n 'fields': ('is_active', 'is_staff', 'is_superuser', 'groups'),\n }),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n\n\nadmin.site.unregister(Group)\nadmin.site.register(Group, RoleGroupAdmin)\n\nadmin.site.unregister(User)\nadmin.site.register(User, InvenTreeUserAdmin)\n", "path": "InvenTree/users/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.contrib import admin, messages\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.safestring import mark_safe\n\nfrom users.models import RuleSet\n\nUser = get_user_model()\n\n\nclass RuleSetInline(admin.TabularInline):\n \"\"\"\n Class for displaying inline RuleSet data in the Group admin page.\n \"\"\"\n\n model = RuleSet\n can_delete = False\n verbose_name = 'Ruleset'\n verbose_plural_name = 'Rulesets'\n fields = ['name'] + [option for option in RuleSet.RULE_OPTIONS]\n readonly_fields = ['name']\n max_num = len(RuleSet.RULESET_CHOICES)\n min_num = 1\n extra = 0\n\n\nclass InvenTreeGroupAdminForm(forms.ModelForm):\n \"\"\"\n Custom admin form for the Group model.\n\n Adds the ability for editing user membership directly in the group admin page.\n \"\"\"\n\n class Meta:\n model = Group\n exclude = []\n fields = [\n 'name',\n 'users',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.instance.pk:\n # Populate the users field with the current Group users.\n self.fields['users'].initial = self.instance.user_set.all()\n\n # Add the users field.\n users = forms.ModelMultipleChoiceField(\n queryset=User.objects.all(),\n required=False,\n widget=FilteredSelectMultiple('users', False),\n label=_('Users'),\n help_text=_('Select which users are assigned to this group')\n )\n\n def save_m2m(self):\n # Add the users to the Group.\n\n self.instance.user_set.set(self.cleaned_data['users'])\n\n def save(self, *args, **kwargs):\n # Default save\n instance = super().save()\n # Save many-to-many data\n self.save_m2m()\n return instance\n\n\nclass RoleGroupAdmin(admin.ModelAdmin):\n \"\"\"\n Custom admin interface for the Group model\n \"\"\"\n\n form = InvenTreeGroupAdminForm\n\n inlines = [\n RuleSetInline,\n ]\n\n list_display = ('name', 'admin', 'part', 'stock', 'build', 'purchase_order', 'sales_order')\n\n def get_rule_set(self, obj, rule_set_type):\n ''' Return list of permissions for the given ruleset '''\n\n # Get all rulesets associated to object\n rule_sets = RuleSet.objects.filter(group=obj.pk)\n\n # Select ruleset based on type\n for rule_set in rule_sets:\n if rule_set.name == rule_set_type:\n break\n\n def append_permission_level(permission_level, next_level):\n if not permission_level:\n return next_level\n\n if permission_level[:-1].endswith('|'):\n permission_level += next_level\n else:\n permission_level += ' | ' + next_level\n\n return permission_level\n\n permission_level = ''\n\n if rule_set.can_view:\n permission_level = append_permission_level(permission_level, 'V')\n\n if rule_set.can_add:\n permission_level = append_permission_level(permission_level, 'A')\n\n if rule_set.can_change:\n permission_level = append_permission_level(permission_level, 'C')\n\n if rule_set.can_delete:\n permission_level = append_permission_level(permission_level, 'D')\n \n return permission_level\n\n def admin(self, obj):\n return self.get_rule_set(obj, 'admin')\n\n def part(self, obj):\n return self.get_rule_set(obj, 'part')\n\n def stock(self, obj):\n return self.get_rule_set(obj, 'stock')\n\n def build(self, obj):\n return self.get_rule_set(obj, 'build')\n\n def purchase_order(self, obj):\n return self.get_rule_set(obj, 'purchase_order')\n\n def sales_order(self, obj):\n return self.get_rule_set(obj, 'sales_order')\n\n def get_formsets_with_inlines(self, request, obj=None):\n for inline in self.get_inline_instances(request, obj):\n # Hide RuleSetInline in the 'Add role' view\n if not isinstance(inline, RuleSetInline) or obj is not None:\n yield inline.get_formset(request, obj), inline\n\n filter_horizontal = ['permissions']\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n This method serves two purposes:\n - show warning message whenever the group users belong to multiple groups\n - skip saving of the group instance model as inlines needs to be saved before.\n \"\"\"\n\n # Get form cleaned data\n users = form.cleaned_data['users']\n\n # Check for users who are members of multiple groups\n warning_message = ''\n for user in users:\n if user.groups.all().count() > 1:\n warning_message += f'<br>- <b>{user.username}</b> is member of: '\n for idx, group in enumerate(user.groups.all()):\n warning_message += f'<b>{group.name}</b>'\n if idx < len(user.groups.all()) - 1:\n warning_message += ', '\n\n # If any, display warning message when group is saved\n if warning_message:\n warning_message = mark_safe(_(f'The following users are members of multiple groups:'\n f'{warning_message}'))\n messages.add_message(request, messages.WARNING, warning_message)\n\n def save_formset(self, request, form, formset, change):\n # Save inline Rulesets\n formset.save()\n # Save Group instance and update permissions\n form.instance.save(update_fields=['name'])\n\n\nclass InvenTreeUserAdmin(UserAdmin):\n \"\"\"\n Custom admin page for the User model.\n\n Hides the \"permissions\" view as this is now handled\n entirely by groups and RuleSets.\n\n (And it's confusing!)\n \"\"\"\n\n fieldsets = (\n (None, {'fields': ('username', 'password')}),\n (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),\n (_('Permissions'), {\n 'fields': ('is_active', 'is_staff', 'is_superuser', 'groups'),\n }),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n\n\nadmin.site.unregister(Group)\nadmin.site.register(Group, RoleGroupAdmin)\n\nadmin.site.unregister(User)\nadmin.site.register(User, InvenTreeUserAdmin)\n", "path": "InvenTree/users/admin.py"}]} | 1,781 | 526 |
gh_patches_debug_25148 | rasdani/github-patches | git_diff | GPflow__GPflow-1536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check deps on CI
`pip install gpflow` currently installs dependencies (setuptools, scipy) with versions that are incompatible with the tensorflow version installed.
This ticket isn't to fix the dependencies, per se, but suggests adding a `pip check -vvv` stage to CI, so that such problems are caught at PR stage.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # pylint: skip-file
5
6 import os
7 import sys
8
9 from setuptools import find_packages, setup
10
11
12 # Dependencies of GPflow
13 requirements = [
14 "numpy>=1.10.0",
15 "scipy>=0.18.0",
16 "multipledispatch>=0.6",
17 "tabulate",
18 "typing_extensions",
19 "cloudpickle==1.3.0", # temporary workaround for tensorflow/probability#991
20 ]
21
22 if sys.version_info < (3, 7):
23 # became part of stdlib in python 3.7
24 requirements.append("dataclasses")
25
26 # We do not want to install tensorflow in the readthedocs environment, where we
27 # use autodoc_mock_imports instead. Hence we use this flag to decide whether or
28 # not to append tensorflow and tensorflow_probability to the requirements:
29 if os.environ.get("READTHEDOCS") != "True":
30 requirements.extend(["tensorflow>=2.1.0,<2.3", "tensorflow-probability>=0.9,<0.11"])
31
32
33 def read_file(filename):
34 with open(filename, encoding="utf-8") as f:
35 return f.read().strip()
36
37
38 version = read_file("VERSION")
39 readme_text = read_file("README.md")
40
41 packages = find_packages(".", exclude=["tests"])
42
43 setup(
44 name="gpflow",
45 version=version,
46 author="James Hensman, Alex Matthews",
47 author_email="[email protected]",
48 description="Gaussian process methods in TensorFlow",
49 long_description=readme_text,
50 long_description_content_type="text/markdown",
51 license="Apache License 2.0",
52 keywords="machine-learning gaussian-processes kernels tensorflow",
53 url="https://www.gpflow.org",
54 project_urls={
55 "Source on GitHub": "https://github.com/GPflow/GPflow",
56 "Documentation": "https://gpflow.readthedocs.io",
57 },
58 packages=packages,
59 include_package_data=True,
60 install_requires=requirements,
61 extras_require={"ImageToTensorBoard": ["matplotlib"]},
62 python_requires=">=3.6",
63 classifiers=[
64 "License :: OSI Approved :: Apache Software License",
65 "Natural Language :: English",
66 "Operating System :: MacOS :: MacOS X",
67 "Operating System :: Microsoft :: Windows",
68 "Operating System :: POSIX :: Linux",
69 "Programming Language :: Python :: 3.6",
70 "Topic :: Scientific/Engineering :: Artificial Intelligence",
71 ],
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,11 +12,10 @@
# Dependencies of GPflow
requirements = [
"numpy>=1.10.0",
- "scipy>=0.18.0",
+ "scipy>=0.18.0,==1.4.1", # pinned to ==1.4.1 to satisfy tensorflow requirements
"multipledispatch>=0.6",
"tabulate",
"typing_extensions",
- "cloudpickle==1.3.0", # temporary workaround for tensorflow/probability#991
]
if sys.version_info < (3, 7):
@@ -27,7 +26,18 @@
# use autodoc_mock_imports instead. Hence we use this flag to decide whether or
# not to append tensorflow and tensorflow_probability to the requirements:
if os.environ.get("READTHEDOCS") != "True":
- requirements.extend(["tensorflow>=2.1.0,<2.3", "tensorflow-probability>=0.9,<0.11"])
+ requirements.extend(
+ [
+ # tensorflow>=2.3 not compatible with tensorflow-probability<0.11
+ "tensorflow>=2.1.0,<2.3",
+ # tensorflow-probability==0.10.0 doesn't install correctly
+ # https://github.com/tensorflow/probability/issues/991
+ #
+ # gpflow uses private functionality not present in tensorflow-probability~=0.11
+ "tensorflow-probability>=0.9,<0.11,!=0.10.0",
+ "setuptools>=41.0.0", # to satisfy dependency constraints
+ ]
+ )
def read_file(filename):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,11 +12,10 @@\n # Dependencies of GPflow\n requirements = [\n \"numpy>=1.10.0\",\n- \"scipy>=0.18.0\",\n+ \"scipy>=0.18.0,==1.4.1\", # pinned to ==1.4.1 to satisfy tensorflow requirements\n \"multipledispatch>=0.6\",\n \"tabulate\",\n \"typing_extensions\",\n- \"cloudpickle==1.3.0\", # temporary workaround for tensorflow/probability#991\n ]\n \n if sys.version_info < (3, 7):\n@@ -27,7 +26,18 @@\n # use autodoc_mock_imports instead. Hence we use this flag to decide whether or\n # not to append tensorflow and tensorflow_probability to the requirements:\n if os.environ.get(\"READTHEDOCS\") != \"True\":\n- requirements.extend([\"tensorflow>=2.1.0,<2.3\", \"tensorflow-probability>=0.9,<0.11\"])\n+ requirements.extend(\n+ [\n+ # tensorflow>=2.3 not compatible with tensorflow-probability<0.11\n+ \"tensorflow>=2.1.0,<2.3\",\n+ # tensorflow-probability==0.10.0 doesn't install correctly\n+ # https://github.com/tensorflow/probability/issues/991\n+ #\n+ # gpflow uses private functionality not present in tensorflow-probability~=0.11\n+ \"tensorflow-probability>=0.9,<0.11,!=0.10.0\",\n+ \"setuptools>=41.0.0\", # to satisfy dependency constraints\n+ ]\n+ )\n \n \n def read_file(filename):\n", "issue": "Check deps on CI\n`pip install gpflow` currently installs dependencies (setuptools, scipy) with versions that are incompatible with the tensorflow version installed.\r\n\r\nThis ticket isn't to fix the dependencies, per se, but suggests adding a `pip check -vvv` stage to CI, so that such problems are caught at PR stage.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# pylint: skip-file\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n\n# Dependencies of GPflow\nrequirements = [\n \"numpy>=1.10.0\",\n \"scipy>=0.18.0\",\n \"multipledispatch>=0.6\",\n \"tabulate\",\n \"typing_extensions\",\n \"cloudpickle==1.3.0\", # temporary workaround for tensorflow/probability#991\n]\n\nif sys.version_info < (3, 7):\n # became part of stdlib in python 3.7\n requirements.append(\"dataclasses\")\n\n# We do not want to install tensorflow in the readthedocs environment, where we\n# use autodoc_mock_imports instead. Hence we use this flag to decide whether or\n# not to append tensorflow and tensorflow_probability to the requirements:\nif os.environ.get(\"READTHEDOCS\") != \"True\":\n requirements.extend([\"tensorflow>=2.1.0,<2.3\", \"tensorflow-probability>=0.9,<0.11\"])\n\n\ndef read_file(filename):\n with open(filename, encoding=\"utf-8\") as f:\n return f.read().strip()\n\n\nversion = read_file(\"VERSION\")\nreadme_text = read_file(\"README.md\")\n\npackages = find_packages(\".\", exclude=[\"tests\"])\n\nsetup(\n name=\"gpflow\",\n version=version,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=\"Gaussian process methods in TensorFlow\",\n long_description=readme_text,\n long_description_content_type=\"text/markdown\",\n license=\"Apache License 2.0\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"https://www.gpflow.org\",\n project_urls={\n \"Source on GitHub\": \"https://github.com/GPflow/GPflow\",\n \"Documentation\": \"https://gpflow.readthedocs.io\",\n },\n packages=packages,\n include_package_data=True,\n install_requires=requirements,\n extras_require={\"ImageToTensorBoard\": [\"matplotlib\"]},\n python_requires=\">=3.6\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# pylint: skip-file\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n\n# Dependencies of GPflow\nrequirements = [\n \"numpy>=1.10.0\",\n \"scipy>=0.18.0,==1.4.1\", # pinned to ==1.4.1 to satisfy tensorflow requirements\n \"multipledispatch>=0.6\",\n \"tabulate\",\n \"typing_extensions\",\n]\n\nif sys.version_info < (3, 7):\n # became part of stdlib in python 3.7\n requirements.append(\"dataclasses\")\n\n# We do not want to install tensorflow in the readthedocs environment, where we\n# use autodoc_mock_imports instead. Hence we use this flag to decide whether or\n# not to append tensorflow and tensorflow_probability to the requirements:\nif os.environ.get(\"READTHEDOCS\") != \"True\":\n requirements.extend(\n [\n # tensorflow>=2.3 not compatible with tensorflow-probability<0.11\n \"tensorflow>=2.1.0,<2.3\",\n # tensorflow-probability==0.10.0 doesn't install correctly\n # https://github.com/tensorflow/probability/issues/991\n #\n # gpflow uses private functionality not present in tensorflow-probability~=0.11\n \"tensorflow-probability>=0.9,<0.11,!=0.10.0\",\n \"setuptools>=41.0.0\", # to satisfy dependency constraints\n ]\n )\n\n\ndef read_file(filename):\n with open(filename, encoding=\"utf-8\") as f:\n return f.read().strip()\n\n\nversion = read_file(\"VERSION\")\nreadme_text = read_file(\"README.md\")\n\npackages = find_packages(\".\", exclude=[\"tests\"])\n\nsetup(\n name=\"gpflow\",\n version=version,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=\"Gaussian process methods in TensorFlow\",\n long_description=readme_text,\n long_description_content_type=\"text/markdown\",\n license=\"Apache License 2.0\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"https://www.gpflow.org\",\n project_urls={\n \"Source on GitHub\": \"https://github.com/GPflow/GPflow\",\n \"Documentation\": \"https://gpflow.readthedocs.io\",\n },\n packages=packages,\n include_package_data=True,\n install_requires=requirements,\n extras_require={\"ImageToTensorBoard\": [\"matplotlib\"]},\n python_requires=\">=3.6\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "setup.py"}]} | 1,014 | 417 |
gh_patches_debug_30178 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ku-ring-gai Council doesn't work if there is a house number 1A
works - 1A Latona Street PYMBLE 2073
doesn't work - 1 Latona Street PYMBLE 2073
Both exist
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py`
Content:
```
1 import datetime
2 import json
3 import requests
4
5 from bs4 import BeautifulSoup
6 from requests.utils import requote_uri
7 from waste_collection_schedule import Collection
8
9 TITLE = "Ku-ring-gai Council"
10 DESCRIPTION = "Source for Ku-ring-gai Council waste collection."
11 URL = "https://www.krg.nsw.gov.au"
12 TEST_CASES = {
13 "randomHouse": {
14 "post_code": "2070",
15 "suburb": "LINDFIELD",
16 "street_name": "Wolseley Road",
17 "street_number": "42",
18 },
19 "randomAppartment": {
20 "post_code": "2074",
21 "suburb": "WARRAWEE",
22 "street_name": "Cherry Street",
23 "street_number": "4/9",
24 },
25 "randomMultiunit": {
26 "post_code": "2075",
27 "suburb": "ST IVES",
28 "street_name": "Kitchener Street",
29 "street_number": "99/2-8",
30 },
31 }
32
33 API_URLS = {
34 "session":"https://www.krg.nsw.gov.au" ,
35 "search": "https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}",
36 "schedule": "https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
37 }
38
39 HEADERS = {
40 "user-agent": "Mozilla/5.0",
41 }
42
43 ICON_MAP = {
44 "GeneralWaste": "mdi:trash-can",
45 "Recycling": "mdi:recycle",
46 "GreenWaste": "mdi:leaf",
47 }
48
49 ROUNDS = {
50 "GeneralWaste": "General Waste",
51 "Recycling": "Recycling",
52 "GreenWaste": "Green Waste",
53 }
54
55 # _LOGGER = logging.getLogger(__name__)
56
57
58 class Source:
59 def __init__(
60 self, post_code: str, suburb: str, street_name: str, street_number: str
61 ):
62 self.post_code = post_code
63 self.suburb = suburb.upper()
64 self.street_name = street_name
65 self.street_number = street_number
66
67 def fetch(self):
68
69 locationId = 0
70
71 # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie
72 s = requests.Session()
73 q = requote_uri(str(API_URLS["session"]))
74 r0 = s.get(q, headers = HEADERS)
75
76 # Do initial address search
77 address = "{} {}, {} NSW {}".format(self.street_number, self.street_name, self.suburb, self.post_code)
78 q = requote_uri(str(API_URLS["search"]).format(address))
79 r1 = s.get(q, headers = HEADERS)
80 data = json.loads(r1.text)
81
82 # Find the geolocation for the address
83 for item in data["Items"]:
84 if address in item['AddressSingleLine']:
85 locationId = item["Id"]
86 break
87
88 if locationId == 0:
89 return []
90
91 # Retrieve the upcoming collections for location
92 q = requote_uri(str(API_URLS["schedule"]).format(locationId))
93 r2 = s.get(q, headers = HEADERS)
94 data = json.loads(r2.text)
95 responseContent = data["responseContent"]
96
97 soup = BeautifulSoup(responseContent, "html.parser")
98 services = soup.find_all("article")
99
100 entries = []
101
102 for item in services:
103 waste_type = item.find('h3').text
104 date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), "%a %d/%m/%Y").date()
105 entries.append(
106 Collection(
107 date = date,
108 # t=waste_type, # api returns GeneralWaste, Recycling, GreenWaste
109 t = ROUNDS.get(waste_type), # returns user-friendly General Waste, Recycling, Green Waste
110 icon=ICON_MAP.get(waste_type),
111 )
112 )
113
114 return entries
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
@@ -28,8 +28,21 @@
"street_name": "Kitchener Street",
"street_number": "99/2-8",
},
+ "1 Latona St": {
+ "post_code": "2073",
+ "suburb": "PYMBLE",
+ "street_name": "Latona Street",
+ "street_number": "1",
+ },
+ "1A Latona St": {
+ "post_code": "2073",
+ "suburb": "PYMBLE",
+ "street_name": "Latona Street",
+ "street_number": "1A",
+ },
}
+
API_URLS = {
"session":"https://www.krg.nsw.gov.au" ,
"search": "https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}",
@@ -77,16 +90,12 @@
address = "{} {}, {} NSW {}".format(self.street_number, self.street_name, self.suburb, self.post_code)
q = requote_uri(str(API_URLS["search"]).format(address))
r1 = s.get(q, headers = HEADERS)
- data = json.loads(r1.text)
+ data = json.loads(r1.text)["Items"]
# Find the geolocation for the address
- for item in data["Items"]:
+ for item in data:
if address in item['AddressSingleLine']:
locationId = item["Id"]
- break
-
- if locationId == 0:
- return []
# Retrieve the upcoming collections for location
q = requote_uri(str(API_URLS["schedule"]).format(locationId))
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py\n@@ -28,8 +28,21 @@\n \"street_name\": \"Kitchener Street\",\n \"street_number\": \"99/2-8\",\n },\n+ \"1 Latona St\": {\n+ \"post_code\": \"2073\",\n+ \"suburb\": \"PYMBLE\",\n+ \"street_name\": \"Latona Street\",\n+ \"street_number\": \"1\",\n+ },\n+ \"1A Latona St\": {\n+ \"post_code\": \"2073\",\n+ \"suburb\": \"PYMBLE\",\n+ \"street_name\": \"Latona Street\",\n+ \"street_number\": \"1A\",\n+ },\n }\n \n+\n API_URLS = {\n \"session\":\"https://www.krg.nsw.gov.au\" ,\n \"search\": \"https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n@@ -77,16 +90,12 @@\n address = \"{} {}, {} NSW {}\".format(self.street_number, self.street_name, self.suburb, self.post_code)\n q = requote_uri(str(API_URLS[\"search\"]).format(address))\n r1 = s.get(q, headers = HEADERS)\n- data = json.loads(r1.text)\n+ data = json.loads(r1.text)[\"Items\"]\n \n # Find the geolocation for the address\n- for item in data[\"Items\"]:\n+ for item in data:\n if address in item['AddressSingleLine']:\n locationId = item[\"Id\"]\n- break\n-\n- if locationId == 0:\n- return []\n \n # Retrieve the upcoming collections for location\n q = requote_uri(str(API_URLS[\"schedule\"]).format(locationId))\n", "issue": "Ku-ring-gai Council doesn't work if there is a house number 1A\nworks - 1A Latona Street PYMBLE 2073\r\ndoesn't work - 1 Latona Street PYMBLE 2073\r\n\r\nBoth exist\n", "before_files": [{"content": "import datetime\nimport json\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Ku-ring-gai Council\"\nDESCRIPTION = \"Source for Ku-ring-gai Council waste collection.\"\nURL = \"https://www.krg.nsw.gov.au\"\nTEST_CASES = {\n \"randomHouse\": {\n \"post_code\": \"2070\",\n \"suburb\": \"LINDFIELD\",\n \"street_name\": \"Wolseley Road\",\n \"street_number\": \"42\",\n },\n \"randomAppartment\": {\n \"post_code\": \"2074\",\n \"suburb\": \"WARRAWEE\",\n \"street_name\": \"Cherry Street\",\n \"street_number\": \"4/9\",\n },\n \"randomMultiunit\": {\n \"post_code\": \"2075\",\n \"suburb\": \"ST IVES\",\n \"street_name\": \"Kitchener Street\",\n \"street_number\": \"99/2-8\",\n },\n}\n\nAPI_URLS = {\n \"session\":\"https://www.krg.nsw.gov.au\" ,\n \"search\": \"https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n \"schedule\": \"https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n}\n\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\n\nICON_MAP = {\n \"GeneralWaste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"GreenWaste\": \"mdi:leaf\",\n}\n\nROUNDS = {\n \"GeneralWaste\": \"General Waste\",\n \"Recycling\": \"Recycling\",\n \"GreenWaste\": \"Green Waste\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb.upper()\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n\n locationId = 0\n\n # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie\n s = requests.Session()\n q = requote_uri(str(API_URLS[\"session\"]))\n r0 = s.get(q, headers = HEADERS)\n\n # Do initial address search\n address = \"{} {}, {} NSW {}\".format(self.street_number, self.street_name, self.suburb, self.post_code)\n q = requote_uri(str(API_URLS[\"search\"]).format(address))\n r1 = s.get(q, headers = HEADERS)\n data = json.loads(r1.text)\n\n # Find the geolocation for the address\n for item in data[\"Items\"]:\n if address in item['AddressSingleLine']:\n locationId = item[\"Id\"]\n break\n\n if locationId == 0:\n return []\n\n # Retrieve the upcoming collections for location\n q = requote_uri(str(API_URLS[\"schedule\"]).format(locationId))\n r2 = s.get(q, headers = HEADERS)\n data = json.loads(r2.text)\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"article\")\n \n entries = []\n\n for item in services:\n waste_type = item.find('h3').text\n date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), \"%a %d/%m/%Y\").date()\n entries.append(\n Collection(\n date = date,\n # t=waste_type, # api returns GeneralWaste, Recycling, GreenWaste \n t = ROUNDS.get(waste_type), # returns user-friendly General Waste, Recycling, Green Waste\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py"}], "after_files": [{"content": "import datetime\nimport json\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Ku-ring-gai Council\"\nDESCRIPTION = \"Source for Ku-ring-gai Council waste collection.\"\nURL = \"https://www.krg.nsw.gov.au\"\nTEST_CASES = {\n \"randomHouse\": {\n \"post_code\": \"2070\",\n \"suburb\": \"LINDFIELD\",\n \"street_name\": \"Wolseley Road\",\n \"street_number\": \"42\",\n },\n \"randomAppartment\": {\n \"post_code\": \"2074\",\n \"suburb\": \"WARRAWEE\",\n \"street_name\": \"Cherry Street\",\n \"street_number\": \"4/9\",\n },\n \"randomMultiunit\": {\n \"post_code\": \"2075\",\n \"suburb\": \"ST IVES\",\n \"street_name\": \"Kitchener Street\",\n \"street_number\": \"99/2-8\",\n },\n \"1 Latona St\": {\n \"post_code\": \"2073\",\n \"suburb\": \"PYMBLE\",\n \"street_name\": \"Latona Street\",\n \"street_number\": \"1\",\n },\n \"1A Latona St\": {\n \"post_code\": \"2073\",\n \"suburb\": \"PYMBLE\",\n \"street_name\": \"Latona Street\",\n \"street_number\": \"1A\",\n },\n}\n\n\nAPI_URLS = {\n \"session\":\"https://www.krg.nsw.gov.au\" ,\n \"search\": \"https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n \"schedule\": \"https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n}\n\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\n\nICON_MAP = {\n \"GeneralWaste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"GreenWaste\": \"mdi:leaf\",\n}\n\nROUNDS = {\n \"GeneralWaste\": \"General Waste\",\n \"Recycling\": \"Recycling\",\n \"GreenWaste\": \"Green Waste\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb.upper()\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n\n locationId = 0\n\n # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie\n s = requests.Session()\n q = requote_uri(str(API_URLS[\"session\"]))\n r0 = s.get(q, headers = HEADERS)\n\n # Do initial address search\n address = \"{} {}, {} NSW {}\".format(self.street_number, self.street_name, self.suburb, self.post_code)\n q = requote_uri(str(API_URLS[\"search\"]).format(address))\n r1 = s.get(q, headers = HEADERS)\n data = json.loads(r1.text)[\"Items\"]\n\n # Find the geolocation for the address\n for item in data:\n if address in item['AddressSingleLine']:\n locationId = item[\"Id\"]\n\n # Retrieve the upcoming collections for location\n q = requote_uri(str(API_URLS[\"schedule\"]).format(locationId))\n r2 = s.get(q, headers = HEADERS)\n data = json.loads(r2.text)\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"article\")\n \n entries = []\n\n for item in services:\n waste_type = item.find('h3').text\n date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), \"%a %d/%m/%Y\").date()\n entries.append(\n Collection(\n date = date,\n # t=waste_type, # api returns GeneralWaste, Recycling, GreenWaste \n t = ROUNDS.get(waste_type), # returns user-friendly General Waste, Recycling, Green Waste\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py"}]} | 1,452 | 478 |
gh_patches_debug_8227 | rasdani/github-patches | git_diff | spacetelescope__jwql-84 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make dev conda environment more general
We should make our `dev` `conda` environment more generalized so that it can be used on the new test server.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.4.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
8 AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
11
12 setup(
13 name = 'jwql',
14 version = VERSION,
15 description = 'The JWST Quicklook Project',
16 url = 'https://github.com/spacetelescope/jwql.git',
17 author = AUTHORS,
18 author_email='[email protected]',
19 license='BSD',
20 keywords = ['astronomy', 'python'],
21 classifiers = ['Programming Language :: Python'],
22 packages = find_packages(),
23 install_requires = REQUIRES,
24 include_package_data=True,
25 include_dirs = [np.get_include()],
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
-REQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
setup(
name = 'jwql',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\n AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n \n-REQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n \n setup(\n name = 'jwql',\n", "issue": "Make dev conda environment more general\nWe should make our `dev` `conda` environment more generalized so that it can be used on the new test server. \n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name = 'jwql',\n version = VERSION,\n description = 'The JWST Quicklook Project',\n url = 'https://github.com/spacetelescope/jwql.git',\n author = AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords = ['astronomy', 'python'],\n classifiers = ['Programming Language :: Python'],\n packages = find_packages(),\n install_requires = REQUIRES,\n include_package_data=True,\n include_dirs = [np.get_include()],\n )\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name = 'jwql',\n version = VERSION,\n description = 'The JWST Quicklook Project',\n url = 'https://github.com/spacetelescope/jwql.git',\n author = AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords = ['astronomy', 'python'],\n classifiers = ['Programming Language :: Python'],\n packages = find_packages(),\n install_requires = REQUIRES,\n include_package_data=True,\n include_dirs = [np.get_include()],\n )\n", "path": "setup.py"}]} | 571 | 189 |
gh_patches_debug_43497 | rasdani/github-patches | git_diff | ray-project__ray-4114 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rllib] make rollout script support multiagent
Hi,
If I'm correct, only single agent/policy is currently supported in rollout.py. For instance https://github.com/ray-project/ray/blob/2e30f7ba386e716bf80f019dcd473b67d83abb95/python/ray/rllib/rollout.py#L109-L110 references default policy to check if policy uses lstm, which fails when a multi agent configuration is loaded.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/rllib/rollout.py`
Content:
```
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6
7 import argparse
8 import json
9 import os
10 import pickle
11
12 import gym
13 import ray
14 from ray.rllib.agents.registry import get_agent_class
15
16 EXAMPLE_USAGE = """
17 Example Usage via RLlib CLI:
18 rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
19 --env CartPole-v0 --steps 1000000 --out rollouts.pkl
20
21 Example Usage via executable:
22 ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
23 --env CartPole-v0 --steps 1000000 --out rollouts.pkl
24 """
25
26 # Note: if you use any custom models or envs, register them here first, e.g.:
27 #
28 # ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
29 # register_env("pa_cartpole", lambda _: ParametricActionCartpole(10))
30
31
32 def create_parser(parser_creator=None):
33 parser_creator = parser_creator or argparse.ArgumentParser
34 parser = parser_creator(
35 formatter_class=argparse.RawDescriptionHelpFormatter,
36 description="Roll out a reinforcement learning agent "
37 "given a checkpoint.",
38 epilog=EXAMPLE_USAGE)
39
40 parser.add_argument(
41 "checkpoint", type=str, help="Checkpoint from which to roll out.")
42 required_named = parser.add_argument_group("required named arguments")
43 required_named.add_argument(
44 "--run",
45 type=str,
46 required=True,
47 help="The algorithm or model to train. This may refer to the name "
48 "of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
49 "user-defined trainable function or class registered in the "
50 "tune registry.")
51 required_named.add_argument(
52 "--env", type=str, help="The gym environment to use.")
53 parser.add_argument(
54 "--no-render",
55 default=False,
56 action="store_const",
57 const=True,
58 help="Surpress rendering of the environment.")
59 parser.add_argument(
60 "--steps", default=10000, help="Number of steps to roll out.")
61 parser.add_argument("--out", default=None, help="Output filename.")
62 parser.add_argument(
63 "--config",
64 default="{}",
65 type=json.loads,
66 help="Algorithm-specific configuration (e.g. env, hyperparams). "
67 "Surpresses loading of configuration from checkpoint.")
68 return parser
69
70
71 def run(args, parser):
72 config = args.config
73 if not config:
74 # Load configuration from file
75 config_dir = os.path.dirname(args.checkpoint)
76 config_path = os.path.join(config_dir, "params.json")
77 if not os.path.exists(config_path):
78 config_path = os.path.join(config_dir, "../params.json")
79 if not os.path.exists(config_path):
80 raise ValueError(
81 "Could not find params.json in either the checkpoint dir or "
82 "its parent directory.")
83 with open(config_path) as f:
84 config = json.load(f)
85 if "num_workers" in config:
86 config["num_workers"] = min(2, config["num_workers"])
87
88 if not args.env:
89 if not config.get("env"):
90 parser.error("the following arguments are required: --env")
91 args.env = config.get("env")
92
93 ray.init()
94
95 cls = get_agent_class(args.run)
96 agent = cls(env=args.env, config=config)
97 agent.restore(args.checkpoint)
98 num_steps = int(args.steps)
99 rollout(agent, args.env, num_steps, args.out, args.no_render)
100
101
102 def rollout(agent, env_name, num_steps, out=None, no_render=True):
103 if hasattr(agent, "local_evaluator"):
104 env = agent.local_evaluator.env
105 else:
106 env = gym.make(env_name)
107
108 if hasattr(agent, "local_evaluator"):
109 state_init = agent.local_evaluator.policy_map[
110 "default"].get_initial_state()
111 else:
112 state_init = []
113 if state_init:
114 use_lstm = True
115 else:
116 use_lstm = False
117
118 if out is not None:
119 rollouts = []
120 steps = 0
121 while steps < (num_steps or steps + 1):
122 if out is not None:
123 rollout = []
124 state = env.reset()
125 done = False
126 reward_total = 0.0
127 while not done and steps < (num_steps or steps + 1):
128 if use_lstm:
129 action, state_init, logits = agent.compute_action(
130 state, state=state_init)
131 else:
132 action = agent.compute_action(state)
133 next_state, reward, done, _ = env.step(action)
134 reward_total += reward
135 if not no_render:
136 env.render()
137 if out is not None:
138 rollout.append([state, action, next_state, reward, done])
139 steps += 1
140 state = next_state
141 if out is not None:
142 rollouts.append(rollout)
143 print("Episode reward", reward_total)
144 if out is not None:
145 pickle.dump(rollouts, open(out, "wb"))
146
147
148 if __name__ == "__main__":
149 parser = create_parser()
150 args = parser.parse_args()
151 run(args, parser)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/rllib/rollout.py b/python/ray/rllib/rollout.py
--- a/python/ray/rllib/rollout.py
+++ b/python/ray/rllib/rollout.py
@@ -73,15 +73,15 @@
if not config:
# Load configuration from file
config_dir = os.path.dirname(args.checkpoint)
- config_path = os.path.join(config_dir, "params.json")
+ config_path = os.path.join(config_dir, "params.pkl")
if not os.path.exists(config_path):
- config_path = os.path.join(config_dir, "../params.json")
+ config_path = os.path.join(config_dir, "../params.pkl")
if not os.path.exists(config_path):
raise ValueError(
- "Could not find params.json in either the checkpoint dir or "
+ "Could not find params.pkl in either the checkpoint dir or "
"its parent directory.")
- with open(config_path) as f:
- config = json.load(f)
+ with open(config_path, 'rb') as f:
+ config = pickle.load(f)
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
@@ -102,18 +102,18 @@
def rollout(agent, env_name, num_steps, out=None, no_render=True):
if hasattr(agent, "local_evaluator"):
env = agent.local_evaluator.env
+ multiagent = agent.local_evaluator.multiagent
+ if multiagent:
+ policy_agent_mapping = agent.config["multiagent"][
+ "policy_mapping_fn"]
+ mapping_cache = {}
+ policy_map = agent.local_evaluator.policy_map
+ state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
+ use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
else:
env = gym.make(env_name)
-
- if hasattr(agent, "local_evaluator"):
- state_init = agent.local_evaluator.policy_map[
- "default"].get_initial_state()
- else:
- state_init = []
- if state_init:
- use_lstm = True
- else:
- use_lstm = False
+ multiagent = False
+ use_lstm = {'default': False}
if out is not None:
rollouts = []
@@ -125,13 +125,39 @@
done = False
reward_total = 0.0
while not done and steps < (num_steps or steps + 1):
- if use_lstm:
- action, state_init, logits = agent.compute_action(
- state, state=state_init)
+ if multiagent:
+ action_dict = {}
+ for agent_id in state.keys():
+ a_state = state[agent_id]
+ if a_state is not None:
+ policy_id = mapping_cache.setdefault(
+ agent_id, policy_agent_mapping(agent_id))
+ p_use_lstm = use_lstm[policy_id]
+ if p_use_lstm:
+ a_action, p_state_init, _ = agent.compute_action(
+ a_state,
+ state=state_init[policy_id],
+ policy_id=policy_id)
+ state_init[policy_id] = p_state_init
+ else:
+ a_action = agent.compute_action(
+ a_state, policy_id=policy_id)
+ action_dict[agent_id] = a_action
+ action = action_dict
else:
- action = agent.compute_action(state)
+ if use_lstm["default"]:
+ action, state_init, _ = agent.compute_action(
+ state, state=state_init)
+ else:
+ action = agent.compute_action(state)
+
next_state, reward, done, _ = env.step(action)
- reward_total += reward
+
+ if multiagent:
+ done = done["__all__"]
+ reward_total += sum(reward.values())
+ else:
+ reward_total += reward
if not no_render:
env.render()
if out is not None:
@@ -141,6 +167,7 @@
if out is not None:
rollouts.append(rollout)
print("Episode reward", reward_total)
+
if out is not None:
pickle.dump(rollouts, open(out, "wb"))
| {"golden_diff": "diff --git a/python/ray/rllib/rollout.py b/python/ray/rllib/rollout.py\n--- a/python/ray/rllib/rollout.py\n+++ b/python/ray/rllib/rollout.py\n@@ -73,15 +73,15 @@\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n- config_path = os.path.join(config_dir, \"params.json\")\n+ config_path = os.path.join(config_dir, \"params.pkl\")\n if not os.path.exists(config_path):\n- config_path = os.path.join(config_dir, \"../params.json\")\n+ config_path = os.path.join(config_dir, \"../params.pkl\")\n if not os.path.exists(config_path):\n raise ValueError(\n- \"Could not find params.json in either the checkpoint dir or \"\n+ \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n- with open(config_path) as f:\n- config = json.load(f)\n+ with open(config_path, 'rb') as f:\n+ config = pickle.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n \n@@ -102,18 +102,18 @@\n def rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n+ multiagent = agent.local_evaluator.multiagent\n+ if multiagent:\n+ policy_agent_mapping = agent.config[\"multiagent\"][\n+ \"policy_mapping_fn\"]\n+ mapping_cache = {}\n+ policy_map = agent.local_evaluator.policy_map\n+ state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n+ use_lstm = {p: len(s) > 0 for p, s in state_init.items()}\n else:\n env = gym.make(env_name)\n-\n- if hasattr(agent, \"local_evaluator\"):\n- state_init = agent.local_evaluator.policy_map[\n- \"default\"].get_initial_state()\n- else:\n- state_init = []\n- if state_init:\n- use_lstm = True\n- else:\n- use_lstm = False\n+ multiagent = False\n+ use_lstm = {'default': False}\n \n if out is not None:\n rollouts = []\n@@ -125,13 +125,39 @@\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n- if use_lstm:\n- action, state_init, logits = agent.compute_action(\n- state, state=state_init)\n+ if multiagent:\n+ action_dict = {}\n+ for agent_id in state.keys():\n+ a_state = state[agent_id]\n+ if a_state is not None:\n+ policy_id = mapping_cache.setdefault(\n+ agent_id, policy_agent_mapping(agent_id))\n+ p_use_lstm = use_lstm[policy_id]\n+ if p_use_lstm:\n+ a_action, p_state_init, _ = agent.compute_action(\n+ a_state,\n+ state=state_init[policy_id],\n+ policy_id=policy_id)\n+ state_init[policy_id] = p_state_init\n+ else:\n+ a_action = agent.compute_action(\n+ a_state, policy_id=policy_id)\n+ action_dict[agent_id] = a_action\n+ action = action_dict\n else:\n- action = agent.compute_action(state)\n+ if use_lstm[\"default\"]:\n+ action, state_init, _ = agent.compute_action(\n+ state, state=state_init)\n+ else:\n+ action = agent.compute_action(state)\n+\n next_state, reward, done, _ = env.step(action)\n- reward_total += reward\n+\n+ if multiagent:\n+ done = done[\"__all__\"]\n+ reward_total += sum(reward.values())\n+ else:\n+ reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n@@ -141,6 +167,7 @@\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n+\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n", "issue": "[rllib] make rollout script support multiagent \nHi,\r\n\r\nIf I'm correct, only single agent/policy is currently supported in rollout.py. For instance https://github.com/ray-project/ray/blob/2e30f7ba386e716bf80f019dcd473b67d83abb95/python/ray/rllib/rollout.py#L109-L110 references default policy to check if policy uses lstm, which fails when a multi agent configuration is loaded.\r\n\r\nThanks!\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport pickle\n\nimport gym\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\n\nEXAMPLE_USAGE = \"\"\"\nExample Usage via RLlib CLI:\n rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\nExample Usage via executable:\n ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\"\"\"\n\n# Note: if you use any custom models or envs, register them here first, e.g.:\n#\n# ModelCatalog.register_custom_model(\"pa_model\", ParametricActionsModel)\n# register_env(\"pa_cartpole\", lambda _: ParametricActionCartpole(10))\n\n\ndef create_parser(parser_creator=None):\n parser_creator = parser_creator or argparse.ArgumentParser\n parser = parser_creator(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Roll out a reinforcement learning agent \"\n \"given a checkpoint.\",\n epilog=EXAMPLE_USAGE)\n\n parser.add_argument(\n \"checkpoint\", type=str, help=\"Checkpoint from which to roll out.\")\n required_named = parser.add_argument_group(\"required named arguments\")\n required_named.add_argument(\n \"--run\",\n type=str,\n required=True,\n help=\"The algorithm or model to train. This may refer to the name \"\n \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n \"user-defined trainable function or class registered in the \"\n \"tune registry.\")\n required_named.add_argument(\n \"--env\", type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--no-render\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Surpress rendering of the environment.\")\n parser.add_argument(\n \"--steps\", default=10000, help=\"Number of steps to roll out.\")\n parser.add_argument(\"--out\", default=None, help=\"Output filename.\")\n parser.add_argument(\n \"--config\",\n default=\"{}\",\n type=json.loads,\n help=\"Algorithm-specific configuration (e.g. env, hyperparams). \"\n \"Surpresses loading of configuration from checkpoint.\")\n return parser\n\n\ndef run(args, parser):\n config = args.config\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n config_path = os.path.join(config_dir, \"params.json\")\n if not os.path.exists(config_path):\n config_path = os.path.join(config_dir, \"../params.json\")\n if not os.path.exists(config_path):\n raise ValueError(\n \"Could not find params.json in either the checkpoint dir or \"\n \"its parent directory.\")\n with open(config_path) as f:\n config = json.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n args.env = config.get(\"env\")\n\n ray.init()\n\n cls = get_agent_class(args.run)\n agent = cls(env=args.env, config=config)\n agent.restore(args.checkpoint)\n num_steps = int(args.steps)\n rollout(agent, args.env, num_steps, args.out, args.no_render)\n\n\ndef rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n else:\n env = gym.make(env_name)\n\n if hasattr(agent, \"local_evaluator\"):\n state_init = agent.local_evaluator.policy_map[\n \"default\"].get_initial_state()\n else:\n state_init = []\n if state_init:\n use_lstm = True\n else:\n use_lstm = False\n\n if out is not None:\n rollouts = []\n steps = 0\n while steps < (num_steps or steps + 1):\n if out is not None:\n rollout = []\n state = env.reset()\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n if use_lstm:\n action, state_init, logits = agent.compute_action(\n state, state=state_init)\n else:\n action = agent.compute_action(state)\n next_state, reward, done, _ = env.step(action)\n reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n rollout.append([state, action, next_state, reward, done])\n steps += 1\n state = next_state\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "python/ray/rllib/rollout.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport pickle\n\nimport gym\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\n\nEXAMPLE_USAGE = \"\"\"\nExample Usage via RLlib CLI:\n rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\nExample Usage via executable:\n ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\"\"\"\n\n# Note: if you use any custom models or envs, register them here first, e.g.:\n#\n# ModelCatalog.register_custom_model(\"pa_model\", ParametricActionsModel)\n# register_env(\"pa_cartpole\", lambda _: ParametricActionCartpole(10))\n\n\ndef create_parser(parser_creator=None):\n parser_creator = parser_creator or argparse.ArgumentParser\n parser = parser_creator(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Roll out a reinforcement learning agent \"\n \"given a checkpoint.\",\n epilog=EXAMPLE_USAGE)\n\n parser.add_argument(\n \"checkpoint\", type=str, help=\"Checkpoint from which to roll out.\")\n required_named = parser.add_argument_group(\"required named arguments\")\n required_named.add_argument(\n \"--run\",\n type=str,\n required=True,\n help=\"The algorithm or model to train. This may refer to the name \"\n \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n \"user-defined trainable function or class registered in the \"\n \"tune registry.\")\n required_named.add_argument(\n \"--env\", type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--no-render\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Surpress rendering of the environment.\")\n parser.add_argument(\n \"--steps\", default=10000, help=\"Number of steps to roll out.\")\n parser.add_argument(\"--out\", default=None, help=\"Output filename.\")\n parser.add_argument(\n \"--config\",\n default=\"{}\",\n type=json.loads,\n help=\"Algorithm-specific configuration (e.g. env, hyperparams). \"\n \"Surpresses loading of configuration from checkpoint.\")\n return parser\n\n\ndef run(args, parser):\n config = args.config\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n config_path = os.path.join(config_dir, \"params.pkl\")\n if not os.path.exists(config_path):\n config_path = os.path.join(config_dir, \"../params.pkl\")\n if not os.path.exists(config_path):\n raise ValueError(\n \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n with open(config_path, 'rb') as f:\n config = pickle.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n args.env = config.get(\"env\")\n\n ray.init()\n\n cls = get_agent_class(args.run)\n agent = cls(env=args.env, config=config)\n agent.restore(args.checkpoint)\n num_steps = int(args.steps)\n rollout(agent, args.env, num_steps, args.out, args.no_render)\n\n\ndef rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n multiagent = agent.local_evaluator.multiagent\n if multiagent:\n policy_agent_mapping = agent.config[\"multiagent\"][\n \"policy_mapping_fn\"]\n mapping_cache = {}\n policy_map = agent.local_evaluator.policy_map\n state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n use_lstm = {p: len(s) > 0 for p, s in state_init.items()}\n else:\n env = gym.make(env_name)\n multiagent = False\n use_lstm = {'default': False}\n\n if out is not None:\n rollouts = []\n steps = 0\n while steps < (num_steps or steps + 1):\n if out is not None:\n rollout = []\n state = env.reset()\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n if multiagent:\n action_dict = {}\n for agent_id in state.keys():\n a_state = state[agent_id]\n if a_state is not None:\n policy_id = mapping_cache.setdefault(\n agent_id, policy_agent_mapping(agent_id))\n p_use_lstm = use_lstm[policy_id]\n if p_use_lstm:\n a_action, p_state_init, _ = agent.compute_action(\n a_state,\n state=state_init[policy_id],\n policy_id=policy_id)\n state_init[policy_id] = p_state_init\n else:\n a_action = agent.compute_action(\n a_state, policy_id=policy_id)\n action_dict[agent_id] = a_action\n action = action_dict\n else:\n if use_lstm[\"default\"]:\n action, state_init, _ = agent.compute_action(\n state, state=state_init)\n else:\n action = agent.compute_action(state)\n\n next_state, reward, done, _ = env.step(action)\n\n if multiagent:\n done = done[\"__all__\"]\n reward_total += sum(reward.values())\n else:\n reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n rollout.append([state, action, next_state, reward, done])\n steps += 1\n state = next_state\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "python/ray/rllib/rollout.py"}]} | 1,876 | 967 |
gh_patches_debug_18727 | rasdani/github-patches | git_diff | scrapy__scrapy-2847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redirect 308 missing
I did a check on the RedirectMiddleware and noticed that code 308 is missing. Is there a reason for that?
Some websites don't update their sitemap and have a long list of 308 from http to https.
(side note: is there a way to add "s" before a link is scraped?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/redirect.py`
Content:
```
1 import logging
2 from six.moves.urllib.parse import urljoin
3
4 from w3lib.url import safe_url_string
5
6 from scrapy.http import HtmlResponse
7 from scrapy.utils.response import get_meta_refresh
8 from scrapy.exceptions import IgnoreRequest, NotConfigured
9
10 logger = logging.getLogger(__name__)
11
12
13 class BaseRedirectMiddleware(object):
14
15 enabled_setting = 'REDIRECT_ENABLED'
16
17 def __init__(self, settings):
18 if not settings.getbool(self.enabled_setting):
19 raise NotConfigured
20
21 self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
22 self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
23
24 @classmethod
25 def from_crawler(cls, crawler):
26 return cls(crawler.settings)
27
28 def _redirect(self, redirected, request, spider, reason):
29 ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
30 redirects = request.meta.get('redirect_times', 0) + 1
31
32 if ttl and redirects <= self.max_redirect_times:
33 redirected.meta['redirect_times'] = redirects
34 redirected.meta['redirect_ttl'] = ttl - 1
35 redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
36 [request.url]
37 redirected.dont_filter = request.dont_filter
38 redirected.priority = request.priority + self.priority_adjust
39 logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
40 {'reason': reason, 'redirected': redirected, 'request': request},
41 extra={'spider': spider})
42 return redirected
43 else:
44 logger.debug("Discarding %(request)s: max redirections reached",
45 {'request': request}, extra={'spider': spider})
46 raise IgnoreRequest("max redirections reached")
47
48 def _redirect_request_using_get(self, request, redirect_url):
49 redirected = request.replace(url=redirect_url, method='GET', body='')
50 redirected.headers.pop('Content-Type', None)
51 redirected.headers.pop('Content-Length', None)
52 return redirected
53
54
55 class RedirectMiddleware(BaseRedirectMiddleware):
56 """
57 Handle redirection of requests based on response status
58 and meta-refresh html tag.
59 """
60 def process_response(self, request, response, spider):
61 if (request.meta.get('dont_redirect', False) or
62 response.status in getattr(spider, 'handle_httpstatus_list', []) or
63 response.status in request.meta.get('handle_httpstatus_list', []) or
64 request.meta.get('handle_httpstatus_all', False)):
65 return response
66
67 allowed_status = (301, 302, 303, 307)
68 if 'Location' not in response.headers or response.status not in allowed_status:
69 return response
70
71 location = safe_url_string(response.headers['location'])
72
73 redirected_url = urljoin(request.url, location)
74
75 if response.status in (301, 307) or request.method == 'HEAD':
76 redirected = request.replace(url=redirected_url)
77 return self._redirect(redirected, request, spider, response.status)
78
79 redirected = self._redirect_request_using_get(request, redirected_url)
80 return self._redirect(redirected, request, spider, response.status)
81
82
83 class MetaRefreshMiddleware(BaseRedirectMiddleware):
84
85 enabled_setting = 'METAREFRESH_ENABLED'
86
87 def __init__(self, settings):
88 super(MetaRefreshMiddleware, self).__init__(settings)
89 self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
90 settings.getint('METAREFRESH_MAXDELAY'))
91
92 def process_response(self, request, response, spider):
93 if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
94 not isinstance(response, HtmlResponse):
95 return response
96
97 interval, url = get_meta_refresh(response)
98 if url and interval < self._maxdelay:
99 redirected = self._redirect_request_using_get(request, url)
100 return self._redirect(redirected, request, spider, 'meta refresh')
101
102 return response
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py
--- a/scrapy/downloadermiddlewares/redirect.py
+++ b/scrapy/downloadermiddlewares/redirect.py
@@ -64,7 +64,7 @@
request.meta.get('handle_httpstatus_all', False)):
return response
- allowed_status = (301, 302, 303, 307)
+ allowed_status = (301, 302, 303, 307, 308)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
@@ -72,7 +72,7 @@
redirected_url = urljoin(request.url, location)
- if response.status in (301, 307) or request.method == 'HEAD':
+ if response.status in (301, 307, 308) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
| {"golden_diff": "diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py\n--- a/scrapy/downloadermiddlewares/redirect.py\n+++ b/scrapy/downloadermiddlewares/redirect.py\n@@ -64,7 +64,7 @@\n request.meta.get('handle_httpstatus_all', False)):\n return response\n \n- allowed_status = (301, 302, 303, 307)\n+ allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n \n@@ -72,7 +72,7 @@\n \n redirected_url = urljoin(request.url, location)\n \n- if response.status in (301, 307) or request.method == 'HEAD':\n+ if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n", "issue": "Redirect 308 missing\nI did a check on the RedirectMiddleware and noticed that code 308 is missing. Is there a reason for that?\r\nSome websites don't update their sitemap and have a long list of 308 from http to https.\r\n\r\n(side note: is there a way to add \"s\" before a link is scraped?)\n", "before_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['location'])\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}], "after_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['location'])\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}]} | 1,400 | 251 |
gh_patches_debug_11217 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1879 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: WasteNet Southland not working after 1.46.0
### I Have A Problem With:
A specific source
### What's Your Problem
The WasteNet Southland website and url has changed about a month ago. The issue created by this change was supposed to be fixed in 1.46.0, but unfortunately it is still not working.
Tested with my address and even with the example data, returning all sensors as unknown.
### Source (if relevant)
wastenet_org_nz
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
_No response_
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py`
Content:
```
1 import re
2 from datetime import datetime
3 from html.parser import HTMLParser
4
5 import requests
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 TITLE = "Gore, Invercargill & Southland"
9 DESCRIPTION = "Source for Wastenet.org.nz."
10 URL = "http://www.wastenet.org.nz"
11 TEST_CASES = {
12 "166 Lewis Street": {"address": "166 Lewis Street"},
13 "Old Format: 199 Crawford Street": {"address": "199 Crawford Street INVERCARGILL"},
14 "Old Format: 156 Tay Street": {"address": "156 Tay Street INVERCARGILL"},
15 "entry_id glass only": {"entry_id": "23571"},
16 # "31 Conyers Street": {"address": "31 Conyers Street INVERCARGILL"}, # Thursday
17 # "67 Chesney Street": {"address": "67 Chesney Street INVERCARGILL"}, # Friday
18 }
19
20 ICON_MAP = {
21 "Glass": "mdi:glass-mug-variant",
22 "Rubbish": "mdi:delete-empty",
23 "Recycle": "mdi:recycle",
24 }
25
26
27 class WasteSearchResultsParser(HTMLParser):
28 def __init__(self):
29 super().__init__()
30 self._entries = []
31 self._wasteType = None
32 self._withinCollectionDay = False
33 self._withinType = False
34
35 @property
36 def entries(self):
37 return self._entries
38
39 def handle_starttag(self, tag, attrs):
40 if tag == "span":
41 d = dict(attrs)
42 if d.get("class", "").startswith("badge"):
43 self._withinType = True
44
45 def handle_data(self, data):
46 if self._withinType:
47 self._withinType = False
48 self._wasteType = data
49 elif data.startswith("Next Service Date:"):
50 self._withinCollectionDay = True
51 elif self._withinCollectionDay:
52 date = datetime.strptime(data, "%y/%m/%d").date()
53 if self._wasteType is not None:
54 self._entries.append(Collection(date, self._wasteType))
55 self._withinCollectionDay = False
56
57
58 HEADER = {"User-Agent": "Mozilla/5.0"}
59
60 SITE_URL = "https://www.wastenet.org.nz/bin-day/"
61 ADDRESS_URL = "https://www.wastenet.org.nz/wp-admin/admin-ajax.php"
62
63
64 class Source:
65 def __init__(self, address: str | None = None, entry_id=None):
66 if not address and not entry_id:
67 raise ValueError("Address or entry_id must be provided")
68
69 self._address = address.replace(" INVERCARGILL", "") if address else None
70 self._entry_id = entry_id
71
72 def get_entry_id(self, s):
73 r = s.get(SITE_URL)
74 r.raise_for_status()
75 # regex find security: 'KEY'
76 match = re.search(r"security: '(\w+)'", r.text)
77 if not match:
78 raise ValueError("Security key not found")
79 security_key = match.group(1)
80
81 # get token
82 params = {
83 "action": "we_data_autocomplete",
84 "term": self._address,
85 "security": security_key,
86 }
87
88 r = s.get(
89 ADDRESS_URL,
90 params=params,
91 )
92 r.raise_for_status()
93
94 return r.json()["data"][0]["url"].split("=")[1]
95
96 def fetch(self):
97 s = requests.Session()
98 s.headers.update(HEADER)
99
100 if self._entry_id is None:
101 self._entry_id = self.get_entry_id(s)
102
103 r = s.get(SITE_URL, params={"entry_id": self._entry_id})
104 r.raise_for_status()
105 p = WasteSearchResultsParser()
106 p.feed(r.text)
107 return p.entries
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py
@@ -49,7 +49,7 @@
elif data.startswith("Next Service Date:"):
self._withinCollectionDay = True
elif self._withinCollectionDay:
- date = datetime.strptime(data, "%y/%m/%d").date()
+ date = datetime.strptime(data, "%d/%m/%y").date()
if self._wasteType is not None:
self._entries.append(Collection(date, self._wasteType))
self._withinCollectionDay = False
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py\n@@ -49,7 +49,7 @@\n elif data.startswith(\"Next Service Date:\"):\n self._withinCollectionDay = True\n elif self._withinCollectionDay:\n- date = datetime.strptime(data, \"%y/%m/%d\").date()\n+ date = datetime.strptime(data, \"%d/%m/%y\").date()\n if self._wasteType is not None:\n self._entries.append(Collection(date, self._wasteType))\n self._withinCollectionDay = False\n", "issue": "[Bug]: WasteNet Southland not working after 1.46.0\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe WasteNet Southland website and url has changed about a month ago. The issue created by this change was supposed to be fixed in 1.46.0, but unfortunately it is still not working.\r\nTested with my address and even with the example data, returning all sensors as unknown.\n\n### Source (if relevant)\n\nwastenet_org_nz\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nfrom datetime import datetime\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Gore, Invercargill & Southland\"\nDESCRIPTION = \"Source for Wastenet.org.nz.\"\nURL = \"http://www.wastenet.org.nz\"\nTEST_CASES = {\n \"166 Lewis Street\": {\"address\": \"166 Lewis Street\"},\n \"Old Format: 199 Crawford Street\": {\"address\": \"199 Crawford Street INVERCARGILL\"},\n \"Old Format: 156 Tay Street\": {\"address\": \"156 Tay Street INVERCARGILL\"},\n \"entry_id glass only\": {\"entry_id\": \"23571\"},\n # \"31 Conyers Street\": {\"address\": \"31 Conyers Street INVERCARGILL\"}, # Thursday\n # \"67 Chesney Street\": {\"address\": \"67 Chesney Street INVERCARGILL\"}, # Friday\n}\n\nICON_MAP = {\n \"Glass\": \"mdi:glass-mug-variant\",\n \"Rubbish\": \"mdi:delete-empty\",\n \"Recycle\": \"mdi:recycle\",\n}\n\n\nclass WasteSearchResultsParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._entries = []\n self._wasteType = None\n self._withinCollectionDay = False\n self._withinType = False\n\n @property\n def entries(self):\n return self._entries\n\n def handle_starttag(self, tag, attrs):\n if tag == \"span\":\n d = dict(attrs)\n if d.get(\"class\", \"\").startswith(\"badge\"):\n self._withinType = True\n\n def handle_data(self, data):\n if self._withinType:\n self._withinType = False\n self._wasteType = data\n elif data.startswith(\"Next Service Date:\"):\n self._withinCollectionDay = True\n elif self._withinCollectionDay:\n date = datetime.strptime(data, \"%y/%m/%d\").date()\n if self._wasteType is not None:\n self._entries.append(Collection(date, self._wasteType))\n self._withinCollectionDay = False\n\n\nHEADER = {\"User-Agent\": \"Mozilla/5.0\"}\n\nSITE_URL = \"https://www.wastenet.org.nz/bin-day/\"\nADDRESS_URL = \"https://www.wastenet.org.nz/wp-admin/admin-ajax.php\"\n\n\nclass Source:\n def __init__(self, address: str | None = None, entry_id=None):\n if not address and not entry_id:\n raise ValueError(\"Address or entry_id must be provided\")\n\n self._address = address.replace(\" INVERCARGILL\", \"\") if address else None\n self._entry_id = entry_id\n\n def get_entry_id(self, s):\n r = s.get(SITE_URL)\n r.raise_for_status()\n # regex find security: 'KEY'\n match = re.search(r\"security: '(\\w+)'\", r.text)\n if not match:\n raise ValueError(\"Security key not found\")\n security_key = match.group(1)\n\n # get token\n params = {\n \"action\": \"we_data_autocomplete\",\n \"term\": self._address,\n \"security\": security_key,\n }\n\n r = s.get(\n ADDRESS_URL,\n params=params,\n )\n r.raise_for_status()\n\n return r.json()[\"data\"][0][\"url\"].split(\"=\")[1]\n\n def fetch(self):\n s = requests.Session()\n s.headers.update(HEADER)\n\n if self._entry_id is None:\n self._entry_id = self.get_entry_id(s)\n\n r = s.get(SITE_URL, params={\"entry_id\": self._entry_id})\n r.raise_for_status()\n p = WasteSearchResultsParser()\n p.feed(r.text)\n return p.entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py"}], "after_files": [{"content": "import re\nfrom datetime import datetime\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Gore, Invercargill & Southland\"\nDESCRIPTION = \"Source for Wastenet.org.nz.\"\nURL = \"http://www.wastenet.org.nz\"\nTEST_CASES = {\n \"166 Lewis Street\": {\"address\": \"166 Lewis Street\"},\n \"Old Format: 199 Crawford Street\": {\"address\": \"199 Crawford Street INVERCARGILL\"},\n \"Old Format: 156 Tay Street\": {\"address\": \"156 Tay Street INVERCARGILL\"},\n \"entry_id glass only\": {\"entry_id\": \"23571\"},\n # \"31 Conyers Street\": {\"address\": \"31 Conyers Street INVERCARGILL\"}, # Thursday\n # \"67 Chesney Street\": {\"address\": \"67 Chesney Street INVERCARGILL\"}, # Friday\n}\n\nICON_MAP = {\n \"Glass\": \"mdi:glass-mug-variant\",\n \"Rubbish\": \"mdi:delete-empty\",\n \"Recycle\": \"mdi:recycle\",\n}\n\n\nclass WasteSearchResultsParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._entries = []\n self._wasteType = None\n self._withinCollectionDay = False\n self._withinType = False\n\n @property\n def entries(self):\n return self._entries\n\n def handle_starttag(self, tag, attrs):\n if tag == \"span\":\n d = dict(attrs)\n if d.get(\"class\", \"\").startswith(\"badge\"):\n self._withinType = True\n\n def handle_data(self, data):\n if self._withinType:\n self._withinType = False\n self._wasteType = data\n elif data.startswith(\"Next Service Date:\"):\n self._withinCollectionDay = True\n elif self._withinCollectionDay:\n date = datetime.strptime(data, \"%d/%m/%y\").date()\n if self._wasteType is not None:\n self._entries.append(Collection(date, self._wasteType))\n self._withinCollectionDay = False\n\n\nHEADER = {\"User-Agent\": \"Mozilla/5.0\"}\n\nSITE_URL = \"https://www.wastenet.org.nz/bin-day/\"\nADDRESS_URL = \"https://www.wastenet.org.nz/wp-admin/admin-ajax.php\"\n\n\nclass Source:\n def __init__(self, address: str | None = None, entry_id=None):\n if not address and not entry_id:\n raise ValueError(\"Address or entry_id must be provided\")\n\n self._address = address.replace(\" INVERCARGILL\", \"\") if address else None\n self._entry_id = entry_id\n\n def get_entry_id(self, s):\n r = s.get(SITE_URL)\n r.raise_for_status()\n # regex find security: 'KEY'\n match = re.search(r\"security: '(\\w+)'\", r.text)\n if not match:\n raise ValueError(\"Security key not found\")\n security_key = match.group(1)\n\n # get token\n params = {\n \"action\": \"we_data_autocomplete\",\n \"term\": self._address,\n \"security\": security_key,\n }\n\n r = s.get(\n ADDRESS_URL,\n params=params,\n )\n r.raise_for_status()\n\n return r.json()[\"data\"][0][\"url\"].split(\"=\")[1]\n\n def fetch(self):\n s = requests.Session()\n s.headers.update(HEADER)\n\n if self._entry_id is None:\n self._entry_id = self.get_entry_id(s)\n\n r = s.get(SITE_URL, params={\"entry_id\": self._entry_id})\n r.raise_for_status()\n p = WasteSearchResultsParser()\n p.feed(r.text)\n return p.entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py"}]} | 1,678 | 191 |
gh_patches_debug_44041 | rasdani/github-patches | git_diff | pypi__warehouse-11122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add caveats to macaroons for expiration (time) and version
**What's the problem this feature will solve?**
This will allow further attenuating the permissions granted by an API key
**Describe the solution you'd like**
Addition of two addition types of caveat, project version (for uploads) and time (expiry).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/macaroons/caveats.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import json
14
15 import pymacaroons
16
17 from warehouse.packaging.models import Project
18
19
20 class InvalidMacaroonError(Exception):
21 ...
22
23
24 class Caveat:
25 def __init__(self, verifier):
26 self.verifier = verifier
27
28 def verify(self, predicate):
29 raise InvalidMacaroonError
30
31 def __call__(self, predicate):
32 return self.verify(predicate)
33
34
35 class V1Caveat(Caveat):
36 def verify_projects(self, projects):
37 # First, ensure that we're actually operating in
38 # the context of a package.
39 if not isinstance(self.verifier.context, Project):
40 raise InvalidMacaroonError(
41 "project-scoped token used outside of a project context"
42 )
43
44 project = self.verifier.context
45 if project.normalized_name in projects:
46 return True
47
48 raise InvalidMacaroonError(
49 f"project-scoped token is not valid for project '{project.name}'"
50 )
51
52 def verify(self, predicate):
53 try:
54 data = json.loads(predicate)
55 except ValueError:
56 raise InvalidMacaroonError("malformatted predicate")
57
58 if data.get("version") != 1:
59 raise InvalidMacaroonError("invalidate version in predicate")
60
61 permissions = data.get("permissions")
62 if permissions is None:
63 raise InvalidMacaroonError("invalid permissions in predicate")
64
65 if permissions == "user":
66 # User-scoped tokens behave exactly like a user's normal credentials.
67 return True
68
69 projects = permissions.get("projects")
70 if projects is None:
71 raise InvalidMacaroonError("invalid projects in predicate")
72
73 return self.verify_projects(projects)
74
75
76 class Verifier:
77 def __init__(self, macaroon, context, principals, permission):
78 self.macaroon = macaroon
79 self.context = context
80 self.principals = principals
81 self.permission = permission
82 self.verifier = pymacaroons.Verifier()
83
84 def verify(self, key):
85 self.verifier.satisfy_general(V1Caveat(self))
86
87 try:
88 return self.verifier.verify(self.macaroon, key)
89 except (
90 pymacaroons.exceptions.MacaroonInvalidSignatureException,
91 Exception, # https://github.com/ecordell/pymacaroons/issues/50
92 ):
93 raise InvalidMacaroonError("invalid macaroon signature")
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/macaroons/caveats.py b/warehouse/macaroons/caveats.py
--- a/warehouse/macaroons/caveats.py
+++ b/warehouse/macaroons/caveats.py
@@ -11,6 +11,7 @@
# limitations under the License.
import json
+import time
import pymacaroons
@@ -24,43 +25,51 @@
class Caveat:
def __init__(self, verifier):
self.verifier = verifier
+ # TODO: Surface this failure reason to the user.
+ # See: https://github.com/pypa/warehouse/issues/9018
+ self.failure_reason = None
- def verify(self, predicate):
- raise InvalidMacaroonError
+ def verify(self, predicate) -> bool:
+ return False
def __call__(self, predicate):
return self.verify(predicate)
class V1Caveat(Caveat):
- def verify_projects(self, projects):
+ def verify_projects(self, projects) -> bool:
# First, ensure that we're actually operating in
# the context of a package.
if not isinstance(self.verifier.context, Project):
- raise InvalidMacaroonError(
+ self.failure_reason = (
"project-scoped token used outside of a project context"
)
+ return False
project = self.verifier.context
if project.normalized_name in projects:
return True
- raise InvalidMacaroonError(
+ self.failure_reason = (
f"project-scoped token is not valid for project '{project.name}'"
)
+ return False
- def verify(self, predicate):
+ def verify(self, predicate) -> bool:
try:
data = json.loads(predicate)
except ValueError:
- raise InvalidMacaroonError("malformatted predicate")
+ self.failure_reason = "malformatted predicate"
+ return False
if data.get("version") != 1:
- raise InvalidMacaroonError("invalidate version in predicate")
+ self.failure_reason = "invalid version in predicate"
+ return False
permissions = data.get("permissions")
if permissions is None:
- raise InvalidMacaroonError("invalid permissions in predicate")
+ self.failure_reason = "invalid permissions in predicate"
+ return False
if permissions == "user":
# User-scoped tokens behave exactly like a user's normal credentials.
@@ -68,11 +77,34 @@
projects = permissions.get("projects")
if projects is None:
- raise InvalidMacaroonError("invalid projects in predicate")
+ self.failure_reason = "invalid projects in predicate"
+ return False
return self.verify_projects(projects)
+class ExpiryCaveat(Caveat):
+ def verify(self, predicate):
+ try:
+ data = json.loads(predicate)
+ expiry = data["exp"]
+ not_before = data["nbf"]
+ except (KeyError, ValueError, TypeError):
+ self.failure_reason = "malformatted predicate"
+ return False
+
+ if not expiry or not not_before:
+ self.failure_reason = "missing fields"
+ return False
+
+ now = int(time.time())
+ if now < not_before or now >= expiry:
+ self.failure_reason = "token is expired"
+ return False
+
+ return True
+
+
class Verifier:
def __init__(self, macaroon, context, principals, permission):
self.macaroon = macaroon
@@ -83,6 +115,7 @@
def verify(self, key):
self.verifier.satisfy_general(V1Caveat(self))
+ self.verifier.satisfy_general(ExpiryCaveat(self))
try:
return self.verifier.verify(self.macaroon, key)
@@ -90,4 +123,4 @@
pymacaroons.exceptions.MacaroonInvalidSignatureException,
Exception, # https://github.com/ecordell/pymacaroons/issues/50
):
- raise InvalidMacaroonError("invalid macaroon signature")
+ return False
| {"golden_diff": "diff --git a/warehouse/macaroons/caveats.py b/warehouse/macaroons/caveats.py\n--- a/warehouse/macaroons/caveats.py\n+++ b/warehouse/macaroons/caveats.py\n@@ -11,6 +11,7 @@\n # limitations under the License.\n \n import json\n+import time\n \n import pymacaroons\n \n@@ -24,43 +25,51 @@\n class Caveat:\n def __init__(self, verifier):\n self.verifier = verifier\n+ # TODO: Surface this failure reason to the user.\n+ # See: https://github.com/pypa/warehouse/issues/9018\n+ self.failure_reason = None\n \n- def verify(self, predicate):\n- raise InvalidMacaroonError\n+ def verify(self, predicate) -> bool:\n+ return False\n \n def __call__(self, predicate):\n return self.verify(predicate)\n \n \n class V1Caveat(Caveat):\n- def verify_projects(self, projects):\n+ def verify_projects(self, projects) -> bool:\n # First, ensure that we're actually operating in\n # the context of a package.\n if not isinstance(self.verifier.context, Project):\n- raise InvalidMacaroonError(\n+ self.failure_reason = (\n \"project-scoped token used outside of a project context\"\n )\n+ return False\n \n project = self.verifier.context\n if project.normalized_name in projects:\n return True\n \n- raise InvalidMacaroonError(\n+ self.failure_reason = (\n f\"project-scoped token is not valid for project '{project.name}'\"\n )\n+ return False\n \n- def verify(self, predicate):\n+ def verify(self, predicate) -> bool:\n try:\n data = json.loads(predicate)\n except ValueError:\n- raise InvalidMacaroonError(\"malformatted predicate\")\n+ self.failure_reason = \"malformatted predicate\"\n+ return False\n \n if data.get(\"version\") != 1:\n- raise InvalidMacaroonError(\"invalidate version in predicate\")\n+ self.failure_reason = \"invalid version in predicate\"\n+ return False\n \n permissions = data.get(\"permissions\")\n if permissions is None:\n- raise InvalidMacaroonError(\"invalid permissions in predicate\")\n+ self.failure_reason = \"invalid permissions in predicate\"\n+ return False\n \n if permissions == \"user\":\n # User-scoped tokens behave exactly like a user's normal credentials.\n@@ -68,11 +77,34 @@\n \n projects = permissions.get(\"projects\")\n if projects is None:\n- raise InvalidMacaroonError(\"invalid projects in predicate\")\n+ self.failure_reason = \"invalid projects in predicate\"\n+ return False\n \n return self.verify_projects(projects)\n \n \n+class ExpiryCaveat(Caveat):\n+ def verify(self, predicate):\n+ try:\n+ data = json.loads(predicate)\n+ expiry = data[\"exp\"]\n+ not_before = data[\"nbf\"]\n+ except (KeyError, ValueError, TypeError):\n+ self.failure_reason = \"malformatted predicate\"\n+ return False\n+\n+ if not expiry or not not_before:\n+ self.failure_reason = \"missing fields\"\n+ return False\n+\n+ now = int(time.time())\n+ if now < not_before or now >= expiry:\n+ self.failure_reason = \"token is expired\"\n+ return False\n+\n+ return True\n+\n+\n class Verifier:\n def __init__(self, macaroon, context, principals, permission):\n self.macaroon = macaroon\n@@ -83,6 +115,7 @@\n \n def verify(self, key):\n self.verifier.satisfy_general(V1Caveat(self))\n+ self.verifier.satisfy_general(ExpiryCaveat(self))\n \n try:\n return self.verifier.verify(self.macaroon, key)\n@@ -90,4 +123,4 @@\n pymacaroons.exceptions.MacaroonInvalidSignatureException,\n Exception, # https://github.com/ecordell/pymacaroons/issues/50\n ):\n- raise InvalidMacaroonError(\"invalid macaroon signature\")\n+ return False\n", "issue": "Add caveats to macaroons for expiration (time) and version\n**What's the problem this feature will solve?**\r\n\r\nThis will allow further attenuating the permissions granted by an API key\r\n\r\n**Describe the solution you'd like**\r\n\r\nAddition of two addition types of caveat, project version (for uploads) and time (expiry).\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pymacaroons\n\nfrom warehouse.packaging.models import Project\n\n\nclass InvalidMacaroonError(Exception):\n ...\n\n\nclass Caveat:\n def __init__(self, verifier):\n self.verifier = verifier\n\n def verify(self, predicate):\n raise InvalidMacaroonError\n\n def __call__(self, predicate):\n return self.verify(predicate)\n\n\nclass V1Caveat(Caveat):\n def verify_projects(self, projects):\n # First, ensure that we're actually operating in\n # the context of a package.\n if not isinstance(self.verifier.context, Project):\n raise InvalidMacaroonError(\n \"project-scoped token used outside of a project context\"\n )\n\n project = self.verifier.context\n if project.normalized_name in projects:\n return True\n\n raise InvalidMacaroonError(\n f\"project-scoped token is not valid for project '{project.name}'\"\n )\n\n def verify(self, predicate):\n try:\n data = json.loads(predicate)\n except ValueError:\n raise InvalidMacaroonError(\"malformatted predicate\")\n\n if data.get(\"version\") != 1:\n raise InvalidMacaroonError(\"invalidate version in predicate\")\n\n permissions = data.get(\"permissions\")\n if permissions is None:\n raise InvalidMacaroonError(\"invalid permissions in predicate\")\n\n if permissions == \"user\":\n # User-scoped tokens behave exactly like a user's normal credentials.\n return True\n\n projects = permissions.get(\"projects\")\n if projects is None:\n raise InvalidMacaroonError(\"invalid projects in predicate\")\n\n return self.verify_projects(projects)\n\n\nclass Verifier:\n def __init__(self, macaroon, context, principals, permission):\n self.macaroon = macaroon\n self.context = context\n self.principals = principals\n self.permission = permission\n self.verifier = pymacaroons.Verifier()\n\n def verify(self, key):\n self.verifier.satisfy_general(V1Caveat(self))\n\n try:\n return self.verifier.verify(self.macaroon, key)\n except (\n pymacaroons.exceptions.MacaroonInvalidSignatureException,\n Exception, # https://github.com/ecordell/pymacaroons/issues/50\n ):\n raise InvalidMacaroonError(\"invalid macaroon signature\")\n", "path": "warehouse/macaroons/caveats.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport time\n\nimport pymacaroons\n\nfrom warehouse.packaging.models import Project\n\n\nclass InvalidMacaroonError(Exception):\n ...\n\n\nclass Caveat:\n def __init__(self, verifier):\n self.verifier = verifier\n # TODO: Surface this failure reason to the user.\n # See: https://github.com/pypa/warehouse/issues/9018\n self.failure_reason = None\n\n def verify(self, predicate) -> bool:\n return False\n\n def __call__(self, predicate):\n return self.verify(predicate)\n\n\nclass V1Caveat(Caveat):\n def verify_projects(self, projects) -> bool:\n # First, ensure that we're actually operating in\n # the context of a package.\n if not isinstance(self.verifier.context, Project):\n self.failure_reason = (\n \"project-scoped token used outside of a project context\"\n )\n return False\n\n project = self.verifier.context\n if project.normalized_name in projects:\n return True\n\n self.failure_reason = (\n f\"project-scoped token is not valid for project '{project.name}'\"\n )\n return False\n\n def verify(self, predicate) -> bool:\n try:\n data = json.loads(predicate)\n except ValueError:\n self.failure_reason = \"malformatted predicate\"\n return False\n\n if data.get(\"version\") != 1:\n self.failure_reason = \"invalid version in predicate\"\n return False\n\n permissions = data.get(\"permissions\")\n if permissions is None:\n self.failure_reason = \"invalid permissions in predicate\"\n return False\n\n if permissions == \"user\":\n # User-scoped tokens behave exactly like a user's normal credentials.\n return True\n\n projects = permissions.get(\"projects\")\n if projects is None:\n self.failure_reason = \"invalid projects in predicate\"\n return False\n\n return self.verify_projects(projects)\n\n\nclass ExpiryCaveat(Caveat):\n def verify(self, predicate):\n try:\n data = json.loads(predicate)\n expiry = data[\"exp\"]\n not_before = data[\"nbf\"]\n except (KeyError, ValueError, TypeError):\n self.failure_reason = \"malformatted predicate\"\n return False\n\n if not expiry or not not_before:\n self.failure_reason = \"missing fields\"\n return False\n\n now = int(time.time())\n if now < not_before or now >= expiry:\n self.failure_reason = \"token is expired\"\n return False\n\n return True\n\n\nclass Verifier:\n def __init__(self, macaroon, context, principals, permission):\n self.macaroon = macaroon\n self.context = context\n self.principals = principals\n self.permission = permission\n self.verifier = pymacaroons.Verifier()\n\n def verify(self, key):\n self.verifier.satisfy_general(V1Caveat(self))\n self.verifier.satisfy_general(ExpiryCaveat(self))\n\n try:\n return self.verifier.verify(self.macaroon, key)\n except (\n pymacaroons.exceptions.MacaroonInvalidSignatureException,\n Exception, # https://github.com/ecordell/pymacaroons/issues/50\n ):\n return False\n", "path": "warehouse/macaroons/caveats.py"}]} | 1,151 | 922 |
gh_patches_debug_37260 | rasdani/github-patches | git_diff | kubeflow__pipelines-4363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/aws.py`
Content:
```
1 # Copyright 2019 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):
16 """An operator that configures the container to use AWS credentials.
17
18 AWS doesn't create secret along with kubeflow deployment and it requires users
19 to manually create credential secret with proper permissions.
20
21 ::
22
23 apiVersion: v1
24 kind: Secret
25 metadata:
26 name: aws-secret
27 type: Opaque
28 data:
29 AWS_ACCESS_KEY_ID: BASE64_YOUR_AWS_ACCESS_KEY_ID
30 AWS_SECRET_ACCESS_KEY: BASE64_YOUR_AWS_SECRET_ACCESS_KEY
31 """
32
33 def _use_aws_secret(task):
34 from kubernetes import client as k8s_client
35 (
36 task.container
37 .add_env_variable(
38 k8s_client.V1EnvVar(
39 name='AWS_ACCESS_KEY_ID',
40 value_from=k8s_client.V1EnvVarSource(
41 secret_key_ref=k8s_client.V1SecretKeySelector(
42 name=secret_name,
43 key=aws_access_key_id_name
44 )
45 )
46 )
47 )
48 .add_env_variable(
49 k8s_client.V1EnvVar(
50 name='AWS_SECRET_ACCESS_KEY',
51 value_from=k8s_client.V1EnvVarSource(
52 secret_key_ref=k8s_client.V1SecretKeySelector(
53 name=secret_name,
54 key=aws_secret_access_key_name
55 )
56 )
57 )
58 )
59 )
60 return task
61
62 return _use_aws_secret
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/aws.py b/sdk/python/kfp/aws.py
--- a/sdk/python/kfp/aws.py
+++ b/sdk/python/kfp/aws.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):
+def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY', aws_region=None):
"""An operator that configures the container to use AWS credentials.
AWS doesn't create secret along with kubeflow deployment and it requires users
@@ -32,31 +32,38 @@
def _use_aws_secret(task):
from kubernetes import client as k8s_client
- (
- task.container
- .add_env_variable(
- k8s_client.V1EnvVar(
- name='AWS_ACCESS_KEY_ID',
- value_from=k8s_client.V1EnvVarSource(
- secret_key_ref=k8s_client.V1SecretKeySelector(
- name=secret_name,
- key=aws_access_key_id_name
- )
+ task.container \
+ .add_env_variable(
+ k8s_client.V1EnvVar(
+ name='AWS_ACCESS_KEY_ID',
+ value_from=k8s_client.V1EnvVarSource(
+ secret_key_ref=k8s_client.V1SecretKeySelector(
+ name=secret_name,
+ key=aws_access_key_id_name
)
)
)
+ ) \
+ .add_env_variable(
+ k8s_client.V1EnvVar(
+ name='AWS_SECRET_ACCESS_KEY',
+ value_from=k8s_client.V1EnvVarSource(
+ secret_key_ref=k8s_client.V1SecretKeySelector(
+ name=secret_name,
+ key=aws_secret_access_key_name
+ )
+ )
+ )
+ )
+
+ if aws_region:
+ task.container \
.add_env_variable(
k8s_client.V1EnvVar(
- name='AWS_SECRET_ACCESS_KEY',
- value_from=k8s_client.V1EnvVarSource(
- secret_key_ref=k8s_client.V1SecretKeySelector(
- name=secret_name,
- key=aws_secret_access_key_name
- )
- )
+ name='AWS_REGION',
+ value=aws_region
)
)
- )
return task
return _use_aws_secret
| {"golden_diff": "diff --git a/sdk/python/kfp/aws.py b/sdk/python/kfp/aws.py\n--- a/sdk/python/kfp/aws.py\n+++ b/sdk/python/kfp/aws.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):\n+def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY', aws_region=None):\n \"\"\"An operator that configures the container to use AWS credentials.\n \n AWS doesn't create secret along with kubeflow deployment and it requires users\n@@ -32,31 +32,38 @@\n \n def _use_aws_secret(task):\n from kubernetes import client as k8s_client\n- (\n- task.container\n- .add_env_variable(\n- k8s_client.V1EnvVar(\n- name='AWS_ACCESS_KEY_ID',\n- value_from=k8s_client.V1EnvVarSource(\n- secret_key_ref=k8s_client.V1SecretKeySelector(\n- name=secret_name,\n- key=aws_access_key_id_name\n- )\n+ task.container \\\n+ .add_env_variable(\n+ k8s_client.V1EnvVar(\n+ name='AWS_ACCESS_KEY_ID',\n+ value_from=k8s_client.V1EnvVarSource(\n+ secret_key_ref=k8s_client.V1SecretKeySelector(\n+ name=secret_name,\n+ key=aws_access_key_id_name\n )\n )\n )\n+ ) \\\n+ .add_env_variable(\n+ k8s_client.V1EnvVar(\n+ name='AWS_SECRET_ACCESS_KEY',\n+ value_from=k8s_client.V1EnvVarSource(\n+ secret_key_ref=k8s_client.V1SecretKeySelector(\n+ name=secret_name,\n+ key=aws_secret_access_key_name\n+ )\n+ )\n+ )\n+ )\n+\n+ if aws_region:\n+ task.container \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n- name='AWS_SECRET_ACCESS_KEY',\n- value_from=k8s_client.V1EnvVarSource(\n- secret_key_ref=k8s_client.V1SecretKeySelector(\n- name=secret_name,\n- key=aws_secret_access_key_name\n- )\n- )\n+ name='AWS_REGION',\n+ value=aws_region\n )\n )\n- )\n return task\n \n return _use_aws_secret\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2019 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ndef use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):\n \"\"\"An operator that configures the container to use AWS credentials.\n\n AWS doesn't create secret along with kubeflow deployment and it requires users\n to manually create credential secret with proper permissions.\n\n ::\n\n apiVersion: v1\n kind: Secret\n metadata:\n name: aws-secret\n type: Opaque\n data:\n AWS_ACCESS_KEY_ID: BASE64_YOUR_AWS_ACCESS_KEY_ID\n AWS_SECRET_ACCESS_KEY: BASE64_YOUR_AWS_SECRET_ACCESS_KEY\n \"\"\"\n\n def _use_aws_secret(task):\n from kubernetes import client as k8s_client\n (\n task.container\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_ACCESS_KEY_ID',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_access_key_id_name\n )\n )\n )\n )\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_SECRET_ACCESS_KEY',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_secret_access_key_name\n )\n )\n )\n )\n )\n return task\n\n return _use_aws_secret\n", "path": "sdk/python/kfp/aws.py"}], "after_files": [{"content": "# Copyright 2019 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ndef use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY', aws_region=None):\n \"\"\"An operator that configures the container to use AWS credentials.\n\n AWS doesn't create secret along with kubeflow deployment and it requires users\n to manually create credential secret with proper permissions.\n\n ::\n\n apiVersion: v1\n kind: Secret\n metadata:\n name: aws-secret\n type: Opaque\n data:\n AWS_ACCESS_KEY_ID: BASE64_YOUR_AWS_ACCESS_KEY_ID\n AWS_SECRET_ACCESS_KEY: BASE64_YOUR_AWS_SECRET_ACCESS_KEY\n \"\"\"\n\n def _use_aws_secret(task):\n from kubernetes import client as k8s_client\n task.container \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_ACCESS_KEY_ID',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_access_key_id_name\n )\n )\n )\n ) \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_SECRET_ACCESS_KEY',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_secret_access_key_name\n )\n )\n )\n )\n\n if aws_region:\n task.container \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_REGION',\n value=aws_region\n )\n )\n return task\n\n return _use_aws_secret\n", "path": "sdk/python/kfp/aws.py"}]} | 1,201 | 586 |
gh_patches_debug_4153 | rasdani/github-patches | git_diff | svthalia__concrexit-2510 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Thumbnailing of transparent images seems to break
### Describe the bug
<img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### How to reproduce
<img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### Expected behaviour
Not <img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### Screenshots
<img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/utils/media/services.py`
Content:
```
1 import io
2 import os
3
4 from django.conf import settings
5 from django.core import signing
6 from django.core.files.base import ContentFile
7 from django.core.files.storage import get_storage_class, DefaultStorage
8 from django.core.files.uploadedfile import InMemoryUploadedFile
9 from django.db.models.fields.files import FieldFile, ImageFieldFile
10 from django.urls import reverse
11
12
13 def save_image(storage, image, path, format):
14 buffer = io.BytesIO()
15 image.convert("RGB").save(fp=buffer, format=format)
16 buff_val = buffer.getvalue()
17 content = ContentFile(buff_val)
18 file = InMemoryUploadedFile(
19 content,
20 None,
21 f"foo.{format.lower()}",
22 f"image/{format.lower()}",
23 content.tell,
24 None,
25 )
26 return storage.save(path, file)
27
28
29 def get_media_url(file, attachment=False):
30 """Get the url of the provided media file to serve in a browser.
31
32 If the file is private a signature will be added.
33 Do NOT use this with user input
34 :param file: the file field
35 :param attachment: True if the file is a forced download
36 :return: the url of the media
37 """
38 storage = DefaultStorage()
39 file_name = file
40 if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):
41 storage = file.storage
42 file_name = file.name
43
44 return f"{storage.url(file_name, attachment)}"
45
46
47 def get_thumbnail_url(file, size, fit=True):
48 """Get the thumbnail url of a media file, NEVER use this with user input.
49
50 If the thumbnail exists this function will return the url of the
51 media file, with signature if necessary. Does it not yet exist a route
52 that executes the :func:`utils.media.views.generate_thumbnail`
53 will be the output.
54 :param file: the file field
55 :param size: size of the image
56 :param fit: False to keep the aspect ratio, True to crop
57 :return: get-thumbnail path
58 """
59 storage = DefaultStorage()
60 name = file
61
62 if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):
63 storage = file.storage
64 name = file.name
65
66 is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))
67 size_fit = "{}_{}".format(size, int(fit))
68
69 if name.endswith(".svg") and is_public:
70 return storage.url(name)
71
72 sig_info = {
73 "size": size,
74 "fit": int(fit),
75 "name": name,
76 "thumb_path": f"thumbnails/{size_fit}/{name}",
77 "serve_path": f"thumbnails/{size_fit}/{name}",
78 "storage": f"{storage.__class__.__module__}.{storage.__class__.__name__}",
79 }
80
81 # We provide a URL instead of calling it as a function, so that using
82 # it means kicking off a new GET request. If we would need to check all files for the
83 # thumbnails inline, loading an album overview would have high latency.
84 return (
85 reverse("get-thumbnail", args=[os.path.join(size_fit, sig_info["name"])])
86 + f"?sig={signing.dumps(sig_info)}"
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/utils/media/services.py b/website/utils/media/services.py
--- a/website/utils/media/services.py
+++ b/website/utils/media/services.py
@@ -12,7 +12,7 @@
def save_image(storage, image, path, format):
buffer = io.BytesIO()
- image.convert("RGB").save(fp=buffer, format=format)
+ image.convert("RGB" if format == "JPEG" else "RGBA").save(fp=buffer, format=format)
buff_val = buffer.getvalue()
content = ContentFile(buff_val)
file = InMemoryUploadedFile(
| {"golden_diff": "diff --git a/website/utils/media/services.py b/website/utils/media/services.py\n--- a/website/utils/media/services.py\n+++ b/website/utils/media/services.py\n@@ -12,7 +12,7 @@\n \n def save_image(storage, image, path, format):\n buffer = io.BytesIO()\n- image.convert(\"RGB\").save(fp=buffer, format=format)\n+ image.convert(\"RGB\" if format == \"JPEG\" else \"RGBA\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n", "issue": "Thumbnailing of transparent images seems to break\n### Describe the bug\r\n<img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### How to reproduce\r\n<img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### Expected behaviour\r\nNot <img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### Screenshots\r\n<img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "import io\nimport os\n\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import get_storage_class, DefaultStorage\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django.urls import reverse\n\n\ndef save_image(storage, image, path, format):\n buffer = io.BytesIO()\n image.convert(\"RGB\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n content,\n None,\n f\"foo.{format.lower()}\",\n f\"image/{format.lower()}\",\n content.tell,\n None,\n )\n return storage.save(path, file)\n\n\ndef get_media_url(file, attachment=False):\n \"\"\"Get the url of the provided media file to serve in a browser.\n\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n :param attachment: True if the file is a forced download\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n file_name = file\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n file_name = file.name\n\n return f\"{storage.url(file_name, attachment)}\"\n\n\ndef get_thumbnail_url(file, size, fit=True):\n \"\"\"Get the thumbnail url of a media file, NEVER use this with user input.\n\n If the thumbnail exists this function will return the url of the\n media file, with signature if necessary. Does it not yet exist a route\n that executes the :func:`utils.media.views.generate_thumbnail`\n will be the output.\n :param file: the file field\n :param size: size of the image\n :param fit: False to keep the aspect ratio, True to crop\n :return: get-thumbnail path\n \"\"\"\n storage = DefaultStorage()\n name = file\n\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n name = file.name\n\n is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))\n size_fit = \"{}_{}\".format(size, int(fit))\n\n if name.endswith(\".svg\") and is_public:\n return storage.url(name)\n\n sig_info = {\n \"size\": size,\n \"fit\": int(fit),\n \"name\": name,\n \"thumb_path\": f\"thumbnails/{size_fit}/{name}\",\n \"serve_path\": f\"thumbnails/{size_fit}/{name}\",\n \"storage\": f\"{storage.__class__.__module__}.{storage.__class__.__name__}\",\n }\n\n # We provide a URL instead of calling it as a function, so that using\n # it means kicking off a new GET request. If we would need to check all files for the\n # thumbnails inline, loading an album overview would have high latency.\n return (\n reverse(\"get-thumbnail\", args=[os.path.join(size_fit, sig_info[\"name\"])])\n + f\"?sig={signing.dumps(sig_info)}\"\n )\n", "path": "website/utils/media/services.py"}], "after_files": [{"content": "import io\nimport os\n\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import get_storage_class, DefaultStorage\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django.urls import reverse\n\n\ndef save_image(storage, image, path, format):\n buffer = io.BytesIO()\n image.convert(\"RGB\" if format == \"JPEG\" else \"RGBA\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n content,\n None,\n f\"foo.{format.lower()}\",\n f\"image/{format.lower()}\",\n content.tell,\n None,\n )\n return storage.save(path, file)\n\n\ndef get_media_url(file, attachment=False):\n \"\"\"Get the url of the provided media file to serve in a browser.\n\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n :param attachment: True if the file is a forced download\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n file_name = file\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n file_name = file.name\n\n return f\"{storage.url(file_name, attachment)}\"\n\n\ndef get_thumbnail_url(file, size, fit=True):\n \"\"\"Get the thumbnail url of a media file, NEVER use this with user input.\n\n If the thumbnail exists this function will return the url of the\n media file, with signature if necessary. Does it not yet exist a route\n that executes the :func:`utils.media.views.generate_thumbnail`\n will be the output.\n :param file: the file field\n :param size: size of the image\n :param fit: False to keep the aspect ratio, True to crop\n :return: get-thumbnail path\n \"\"\"\n storage = DefaultStorage()\n name = file\n\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n name = file.name\n\n is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))\n size_fit = \"{}_{}\".format(size, int(fit))\n\n if name.endswith(\".svg\") and is_public:\n return storage.url(name)\n\n sig_info = {\n \"size\": size,\n \"fit\": int(fit),\n \"name\": name,\n \"thumb_path\": f\"thumbnails/{size_fit}/{name}\",\n \"serve_path\": f\"thumbnails/{size_fit}/{name}\",\n \"storage\": f\"{storage.__class__.__module__}.{storage.__class__.__name__}\",\n }\n\n # We provide a URL instead of calling it as a function, so that using\n # it means kicking off a new GET request. If we would need to check all files for the\n # thumbnails inline, loading an album overview would have high latency.\n return (\n reverse(\"get-thumbnail\", args=[os.path.join(size_fit, sig_info[\"name\"])])\n + f\"?sig={signing.dumps(sig_info)}\"\n )\n", "path": "website/utils/media/services.py"}]} | 1,445 | 130 |
gh_patches_debug_26436 | rasdani/github-patches | git_diff | Textualize__textual-1066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Header's title text isn't centered properly
> Please give a brief but clear explanation of what the issue is. Let us know what the behaviour you expect is, and what is actually happening. Let us know what operating system you are running on, and what terminal you are using.
`Header`'s title text isn't centered, `show_clock=True` exacerbates the issue. My expectation is that the title is centered within the visible space between the icon/clock (if shown), and between the icon/right edge if not.
> Feel free to add screenshots and/or videos. These can be very helpful!



> If you can, include a complete working example that demonstrates the bug. Please check it can run without modifications.
```python
from textual.app import App, ComposeResult
from textual.widgets import Header, Static
class Demo(App):
TITLE = "Demonstration"
CSS = """
Screen {
layout: grid;
grid-size: 2;
}
.box {
height: 100%;
border: white;
}
"""
def compose(self) -> ComposeResult:
yield Header(show_clock=True)
yield Static(classes="box")
yield Static(classes="box")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_header.py`
Content:
```
1 from __future__ import annotations
2
3 from datetime import datetime
4
5 from rich.text import Text
6
7 from ..widget import Widget
8 from ..reactive import Reactive, watch
9
10
11 class HeaderIcon(Widget):
12 """Display an 'icon' on the left of the header."""
13
14 DEFAULT_CSS = """
15 HeaderIcon {
16 dock: left;
17 padding: 0 1;
18 width: 8;
19 content-align: left middle;
20 }
21 """
22 icon = Reactive("⭘")
23
24 def render(self):
25 return self.icon
26
27
28 class HeaderClock(Widget):
29 """Display a clock on the right of the header."""
30
31 DEFAULT_CSS = """
32 HeaderClock {
33 dock: right;
34 width: 10;
35 padding: 0 1;
36 background: $secondary-background-lighten-1;
37 color: $text;
38 text-opacity: 85%;
39 content-align: center middle;
40 }
41 """
42
43 def on_mount(self) -> None:
44 self.set_interval(1, callback=self.refresh, name=f"update header clock")
45
46 def render(self):
47 return Text(datetime.now().time().strftime("%X"))
48
49
50 class HeaderTitle(Widget):
51 """Display the title / subtitle in the header."""
52
53 DEFAULT_CSS = """
54 HeaderTitle {
55 content-align: center middle;
56 width: 100%;
57 margin-right: 10;
58 }
59 """
60
61 text: Reactive[str] = Reactive("")
62 sub_text = Reactive("")
63
64 def render(self) -> Text:
65 text = Text(self.text, no_wrap=True, overflow="ellipsis")
66 if self.sub_text:
67 text.append(" — ")
68 text.append(self.sub_text, "dim")
69 return text
70
71
72 class Header(Widget):
73 """A header widget with icon and clock.
74
75 Args:
76 show_clock (bool, optional): True if the clock should be shown on the right of the header.
77 """
78
79 DEFAULT_CSS = """
80 Header {
81 dock: top;
82 width: 100%;
83 background: $secondary-background;
84 color: $text;
85 height: 1;
86 }
87 Header.-tall {
88 height: 3;
89 }
90 """
91
92 tall = Reactive(False)
93
94 DEFAULT_CLASSES = ""
95
96 def __init__(
97 self,
98 show_clock: bool = False,
99 *,
100 name: str | None = None,
101 id: str | None = None,
102 classes: str | None = None,
103 ):
104 super().__init__(name=name, id=id, classes=classes)
105 self.show_clock = show_clock
106
107 def compose(self):
108 yield HeaderIcon()
109 yield HeaderTitle()
110 if self.show_clock:
111 yield HeaderClock()
112
113 def watch_tall(self, tall: bool) -> None:
114 self.set_class(tall, "-tall")
115
116 def on_click(self):
117 self.toggle_class("-tall")
118
119 def on_mount(self) -> None:
120 def set_title(title: str) -> None:
121 self.query_one(HeaderTitle).text = title
122
123 def set_sub_title(sub_title: str) -> None:
124 self.query_one(HeaderTitle).sub_text = sub_title
125
126 watch(self.app, "title", set_title)
127 watch(self.app, "sub_title", set_sub_title)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py
--- a/src/textual/widgets/_header.py
+++ b/src/textual/widgets/_header.py
@@ -25,14 +25,26 @@
return self.icon
-class HeaderClock(Widget):
- """Display a clock on the right of the header."""
+class HeaderClockSpace(Widget):
+ """The space taken up by the clock on the right of the header."""
DEFAULT_CSS = """
- HeaderClock {
+ HeaderClockSpace {
dock: right;
width: 10;
padding: 0 1;
+ }
+ """
+
+ def render(self) -> str:
+ return ""
+
+
+class HeaderClock(HeaderClockSpace):
+ """Display a clock on the right of the header."""
+
+ DEFAULT_CSS = """
+ HeaderClock {
background: $secondary-background-lighten-1;
color: $text;
text-opacity: 85%;
@@ -54,7 +66,6 @@
HeaderTitle {
content-align: center middle;
width: 100%;
- margin-right: 10;
}
"""
@@ -107,8 +118,7 @@
def compose(self):
yield HeaderIcon()
yield HeaderTitle()
- if self.show_clock:
- yield HeaderClock()
+ yield HeaderClock() if self.show_clock else HeaderClockSpace()
def watch_tall(self, tall: bool) -> None:
self.set_class(tall, "-tall")
| {"golden_diff": "diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py\n--- a/src/textual/widgets/_header.py\n+++ b/src/textual/widgets/_header.py\n@@ -25,14 +25,26 @@\n return self.icon\n \n \n-class HeaderClock(Widget):\n- \"\"\"Display a clock on the right of the header.\"\"\"\n+class HeaderClockSpace(Widget):\n+ \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n- HeaderClock {\n+ HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n+ }\n+ \"\"\"\n+\n+ def render(self) -> str:\n+ return \"\"\n+\n+\n+class HeaderClock(HeaderClockSpace):\n+ \"\"\"Display a clock on the right of the header.\"\"\"\n+\n+ DEFAULT_CSS = \"\"\"\n+ HeaderClock {\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n@@ -54,7 +66,6 @@\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n- margin-right: 10;\n }\n \"\"\"\n \n@@ -107,8 +118,7 @@\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n- if self.show_clock:\n- yield HeaderClock()\n+ yield HeaderClock() if self.show_clock else HeaderClockSpace()\n \n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n", "issue": "[BUG] Header's title text isn't centered properly \n> Please give a brief but clear explanation of what the issue is. Let us know what the behaviour you expect is, and what is actually happening. Let us know what operating system you are running on, and what terminal you are using.\r\n\r\n`Header`'s title text isn't centered, `show_clock=True` exacerbates the issue. My expectation is that the title is centered within the visible space between the icon/clock (if shown), and between the icon/right edge if not.\r\n\r\n> Feel free to add screenshots and/or videos. These can be very helpful!\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n> If you can, include a complete working example that demonstrates the bug. Please check it can run without modifications.\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Header, Static\r\n\r\nclass Demo(App):\r\n TITLE = \"Demonstration\"\r\n CSS = \"\"\"\r\n Screen {\r\n layout: grid;\r\n grid-size: 2;\r\n }\r\n .box {\r\n height: 100%;\r\n border: white;\r\n }\r\n \"\"\"\r\n\r\n def compose(self) -> ComposeResult:\r\n yield Header(show_clock=True)\r\n yield Static(classes=\"box\")\r\n yield Static(classes=\"box\")\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClock(Widget):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n dock: right;\n width: 10;\n padding: 0 1;\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n margin-right: 10;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $secondary-background;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n }\n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n if self.show_clock:\n yield HeaderClock()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClockSpace(Widget):\n \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n }\n \"\"\"\n\n def render(self) -> str:\n return \"\"\n\n\nclass HeaderClock(HeaderClockSpace):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $secondary-background;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n }\n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n yield HeaderClock() if self.show_clock else HeaderClockSpace()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}]} | 1,693 | 350 |
gh_patches_debug_38859 | rasdani/github-patches | git_diff | ESMCI__cime-1857 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SystemTestsCompareTwo multisubmit tries to do too much in phase 1
In comparing #1830 with what made it to master, I noticed that the indentation of this block is wrong:
```python
# Compare results
# Case1 is the "main" case, and we need to do the comparisons from there
self._activate_case1()
self._link_to_case2_output()
self._component_compare_test(self._run_one_suffix, self._run_two_suffix, success_change=success_change)
```
-- this should be indented under the "Second run" conditional.
The current indentation leads the ERR test (and any other multi-submit test) to try to do component_compare_test after the first phase, leading to a FAIL result. This doesn't cause a test failure, because the FAIL is later overwritten with a PASS, but it is still incorrect.
I have a fix for this in an incoming PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/SystemTests/erp.py`
Content:
```
1 """
2 CIME ERP test. This class inherits from SystemTestsCompareTwo
3
4 This is a pes counts hybrid (open-MP/MPI) restart bfb test from
5 startup. This is just like an ERS test but the pe-counts/threading
6 count are modified on retart.
7 (1) Do an initial run with pes set up out of the box (suffix base)
8 (2) Do a restart test with half the number of tasks and threads (suffix rest)
9 """
10
11 from CIME.XML.standard_module_setup import *
12 from CIME.case_setup import case_setup
13 from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
14 from CIME.check_lockedfiles import *
15
16 logger = logging.getLogger(__name__)
17
18 class ERP(SystemTestsCompareTwo):
19
20 def __init__(self, case):
21 """
22 initialize a test object
23 """
24 SystemTestsCompareTwo.__init__(self, case,
25 separate_builds = True,
26 run_two_suffix = 'rest',
27 run_one_description = 'initial',
28 run_two_description = 'restart')
29
30 def _common_setup(self):
31 self._case.set_value("BUILD_THREADED",True)
32
33 def _case_one_setup(self):
34 stop_n = self._case.get_value("STOP_N")
35
36 expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n))
37
38 def _case_two_setup(self):
39 # halve the number of tasks and threads
40 for comp in self._case.get_values("COMP_CLASSES"):
41 ntasks = self._case1.get_value("NTASKS_{}".format(comp))
42 nthreads = self._case1.get_value("NTHRDS_{}".format(comp))
43 rootpe = self._case1.get_value("ROOTPE_{}".format(comp))
44 if ( nthreads > 1 ):
45 self._case.set_value("NTHRDS_{}".format(comp), nthreads/2)
46 if ( ntasks > 1 ):
47 self._case.set_value("NTASKS_{}".format(comp), ntasks/2)
48 self._case.set_value("ROOTPE_{}".format(comp), rootpe/2)
49
50 stop_n = self._case1.get_value("STOP_N")
51 rest_n = self._case1.get_value("REST_N")
52 stop_new = stop_n - rest_n
53 expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n))
54 self._case.set_value("STOP_N", stop_new)
55 self._case.set_value("HIST_N", stop_n)
56 self._case.set_value("CONTINUE_RUN", True)
57 self._case.set_value("REST_OPTION","never")
58
59 # Note, some components, like CESM-CICE, have
60 # decomposition information in env_build.xml that
61 # needs to be regenerated for the above new tasks and thread counts
62 case_setup(self._case, test_mode=True, reset=True)
63
64 def _case_one_custom_postrun_action(self):
65 self.copy_case1_restarts_to_case2()
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py
--- a/scripts/lib/CIME/SystemTests/erp.py
+++ b/scripts/lib/CIME/SystemTests/erp.py
@@ -1,5 +1,5 @@
"""
-CIME ERP test. This class inherits from SystemTestsCompareTwo
+CIME ERP test. This class inherits from RestartTest
This is a pes counts hybrid (open-MP/MPI) restart bfb test from
startup. This is just like an ERS test but the pe-counts/threading
@@ -10,31 +10,26 @@
from CIME.XML.standard_module_setup import *
from CIME.case_setup import case_setup
-from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
+from CIME.SystemTests.restart_tests import RestartTest
from CIME.check_lockedfiles import *
logger = logging.getLogger(__name__)
-class ERP(SystemTestsCompareTwo):
+class ERP(RestartTest):
def __init__(self, case):
"""
initialize a test object
"""
- SystemTestsCompareTwo.__init__(self, case,
- separate_builds = True,
- run_two_suffix = 'rest',
- run_one_description = 'initial',
- run_two_description = 'restart')
+ RestartTest.__init__(self, case,
+ separate_builds = True,
+ run_two_suffix = 'rest',
+ run_one_description = 'initial',
+ run_two_description = 'restart')
def _common_setup(self):
self._case.set_value("BUILD_THREADED",True)
- def _case_one_setup(self):
- stop_n = self._case.get_value("STOP_N")
-
- expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n))
-
def _case_two_setup(self):
# halve the number of tasks and threads
for comp in self._case.get_values("COMP_CLASSES"):
@@ -47,15 +42,7 @@
self._case.set_value("NTASKS_{}".format(comp), ntasks/2)
self._case.set_value("ROOTPE_{}".format(comp), rootpe/2)
- stop_n = self._case1.get_value("STOP_N")
- rest_n = self._case1.get_value("REST_N")
- stop_new = stop_n - rest_n
- expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n))
- self._case.set_value("STOP_N", stop_new)
- self._case.set_value("HIST_N", stop_n)
- self._case.set_value("CONTINUE_RUN", True)
- self._case.set_value("REST_OPTION","never")
-
+ RestartTest._case_two_setup(self)
# Note, some components, like CESM-CICE, have
# decomposition information in env_build.xml that
# needs to be regenerated for the above new tasks and thread counts
| {"golden_diff": "diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py\n--- a/scripts/lib/CIME/SystemTests/erp.py\n+++ b/scripts/lib/CIME/SystemTests/erp.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-CIME ERP test. This class inherits from SystemTestsCompareTwo\n+CIME ERP test. This class inherits from RestartTest\n \n This is a pes counts hybrid (open-MP/MPI) restart bfb test from\n startup. This is just like an ERS test but the pe-counts/threading\n@@ -10,31 +10,26 @@\n \n from CIME.XML.standard_module_setup import *\n from CIME.case_setup import case_setup\n-from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\n+from CIME.SystemTests.restart_tests import RestartTest\n from CIME.check_lockedfiles import *\n \n logger = logging.getLogger(__name__)\n \n-class ERP(SystemTestsCompareTwo):\n+class ERP(RestartTest):\n \n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n- SystemTestsCompareTwo.__init__(self, case,\n- separate_builds = True,\n- run_two_suffix = 'rest',\n- run_one_description = 'initial',\n- run_two_description = 'restart')\n+ RestartTest.__init__(self, case,\n+ separate_builds = True,\n+ run_two_suffix = 'rest',\n+ run_one_description = 'initial',\n+ run_two_description = 'restart')\n \n def _common_setup(self):\n self._case.set_value(\"BUILD_THREADED\",True)\n \n- def _case_one_setup(self):\n- stop_n = self._case.get_value(\"STOP_N\")\n-\n- expect(stop_n > 2, \"ERROR: stop_n value {:d} too short\".format(stop_n))\n-\n def _case_two_setup(self):\n # halve the number of tasks and threads\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n@@ -47,15 +42,7 @@\n self._case.set_value(\"NTASKS_{}\".format(comp), ntasks/2)\n self._case.set_value(\"ROOTPE_{}\".format(comp), rootpe/2)\n \n- stop_n = self._case1.get_value(\"STOP_N\")\n- rest_n = self._case1.get_value(\"REST_N\")\n- stop_new = stop_n - rest_n\n- expect(stop_new > 0, \"ERROR: stop_n value {:d} too short {:d} {:d}\".format(stop_new,stop_n,rest_n))\n- self._case.set_value(\"STOP_N\", stop_new)\n- self._case.set_value(\"HIST_N\", stop_n)\n- self._case.set_value(\"CONTINUE_RUN\", True)\n- self._case.set_value(\"REST_OPTION\",\"never\")\n-\n+ RestartTest._case_two_setup(self)\n # Note, some components, like CESM-CICE, have\n # decomposition information in env_build.xml that\n # needs to be regenerated for the above new tasks and thread counts\n", "issue": "SystemTestsCompareTwo multisubmit tries to do too much in phase 1\nIn comparing #1830 with what made it to master, I noticed that the indentation of this block is wrong:\r\n\r\n```python\r\n # Compare results\r\n # Case1 is the \"main\" case, and we need to do the comparisons from there\r\n self._activate_case1()\r\n self._link_to_case2_output()\r\n\r\n self._component_compare_test(self._run_one_suffix, self._run_two_suffix, success_change=success_change)\r\n```\r\n\r\n-- this should be indented under the \"Second run\" conditional.\r\n\r\nThe current indentation leads the ERR test (and any other multi-submit test) to try to do component_compare_test after the first phase, leading to a FAIL result. This doesn't cause a test failure, because the FAIL is later overwritten with a PASS, but it is still incorrect.\r\n\r\nI have a fix for this in an incoming PR.\n", "before_files": [{"content": "\"\"\"\nCIME ERP test. This class inherits from SystemTestsCompareTwo\n\nThis is a pes counts hybrid (open-MP/MPI) restart bfb test from\nstartup. This is just like an ERS test but the pe-counts/threading\ncount are modified on retart.\n(1) Do an initial run with pes set up out of the box (suffix base)\n(2) Do a restart test with half the number of tasks and threads (suffix rest)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\nfrom CIME.check_lockedfiles import *\n\nlogger = logging.getLogger(__name__)\n\nclass ERP(SystemTestsCompareTwo):\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds = True,\n run_two_suffix = 'rest',\n run_one_description = 'initial',\n run_two_description = 'restart')\n\n def _common_setup(self):\n self._case.set_value(\"BUILD_THREADED\",True)\n\n def _case_one_setup(self):\n stop_n = self._case.get_value(\"STOP_N\")\n\n expect(stop_n > 2, \"ERROR: stop_n value {:d} too short\".format(stop_n))\n\n def _case_two_setup(self):\n # halve the number of tasks and threads\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n ntasks = self._case1.get_value(\"NTASKS_{}\".format(comp))\n nthreads = self._case1.get_value(\"NTHRDS_{}\".format(comp))\n rootpe = self._case1.get_value(\"ROOTPE_{}\".format(comp))\n if ( nthreads > 1 ):\n self._case.set_value(\"NTHRDS_{}\".format(comp), nthreads/2)\n if ( ntasks > 1 ):\n self._case.set_value(\"NTASKS_{}\".format(comp), ntasks/2)\n self._case.set_value(\"ROOTPE_{}\".format(comp), rootpe/2)\n\n stop_n = self._case1.get_value(\"STOP_N\")\n rest_n = self._case1.get_value(\"REST_N\")\n stop_new = stop_n - rest_n\n expect(stop_new > 0, \"ERROR: stop_n value {:d} too short {:d} {:d}\".format(stop_new,stop_n,rest_n))\n self._case.set_value(\"STOP_N\", stop_new)\n self._case.set_value(\"HIST_N\", stop_n)\n self._case.set_value(\"CONTINUE_RUN\", True)\n self._case.set_value(\"REST_OPTION\",\"never\")\n\n # Note, some components, like CESM-CICE, have\n # decomposition information in env_build.xml that\n # needs to be regenerated for the above new tasks and thread counts\n case_setup(self._case, test_mode=True, reset=True)\n\n def _case_one_custom_postrun_action(self):\n self.copy_case1_restarts_to_case2()\n", "path": "scripts/lib/CIME/SystemTests/erp.py"}], "after_files": [{"content": "\"\"\"\nCIME ERP test. This class inherits from RestartTest\n\nThis is a pes counts hybrid (open-MP/MPI) restart bfb test from\nstartup. This is just like an ERS test but the pe-counts/threading\ncount are modified on retart.\n(1) Do an initial run with pes set up out of the box (suffix base)\n(2) Do a restart test with half the number of tasks and threads (suffix rest)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.restart_tests import RestartTest\nfrom CIME.check_lockedfiles import *\n\nlogger = logging.getLogger(__name__)\n\nclass ERP(RestartTest):\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n RestartTest.__init__(self, case,\n separate_builds = True,\n run_two_suffix = 'rest',\n run_one_description = 'initial',\n run_two_description = 'restart')\n\n def _common_setup(self):\n self._case.set_value(\"BUILD_THREADED\",True)\n\n def _case_two_setup(self):\n # halve the number of tasks and threads\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n ntasks = self._case1.get_value(\"NTASKS_{}\".format(comp))\n nthreads = self._case1.get_value(\"NTHRDS_{}\".format(comp))\n rootpe = self._case1.get_value(\"ROOTPE_{}\".format(comp))\n if ( nthreads > 1 ):\n self._case.set_value(\"NTHRDS_{}\".format(comp), nthreads/2)\n if ( ntasks > 1 ):\n self._case.set_value(\"NTASKS_{}\".format(comp), ntasks/2)\n self._case.set_value(\"ROOTPE_{}\".format(comp), rootpe/2)\n\n RestartTest._case_two_setup(self)\n # Note, some components, like CESM-CICE, have\n # decomposition information in env_build.xml that\n # needs to be regenerated for the above new tasks and thread counts\n case_setup(self._case, test_mode=True, reset=True)\n\n def _case_one_custom_postrun_action(self):\n self.copy_case1_restarts_to_case2()\n", "path": "scripts/lib/CIME/SystemTests/erp.py"}]} | 1,247 | 674 |
gh_patches_debug_15146 | rasdani/github-patches | git_diff | sunpy__sunpy-723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make Glymur an optional dependancy
This is "make io imports optional"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 SunPy: Python for Solar Physics
3
4 The SunPy project is an effort to create an open-source software library for
5 solar physics using the Python programming language.
6 """
7 DOCLINES = __doc__.split("\n")
8
9 CLASSIFIERS = [
10 'Development Status :: 3 - Alpha',
11 'Intended Audience :: Science/Research',
12 'Intended Audience :: Developers',
13 'License :: OSI Approved :: BSD License',
14 'Programming Language :: Python',
15 'Programming Language :: Python :: 3',
16 'Topic :: Software Development',
17 'Topic :: Scientific/Engineering',
18 'Topic :: Scientific/Engineering :: Physics',
19 'Operating System :: Microsoft :: Windows',
20 'Operating System :: POSIX',
21 'Operating System :: Unix',
22 'Operating System :: MacOS'
23 ]
24
25 VERSION = '0.3.2'
26
27 def git_description():
28 import subprocess
29 try:
30 out = subprocess.Popen(['git', 'describe', '--tags'], stdout = subprocess.PIPE).communicate()[0]
31 description = out.strip().decode('ascii')
32 except OSError:
33 description = 'Error: could not run git'
34 return description
35
36 def write_version_py():
37 import os
38 if os.path.exists('.git'):
39 GIT_DESCRIPTION = git_description()
40 else:
41 GIT_DESCRIPTION = 'N/A'
42
43 out = open('sunpy/version.py', 'w')
44 template = """# This file is automatically generated by SunPy's setup.py
45 version = '%(version)s'
46 git_description = '%(git_description)s'
47 """
48 try:
49 out.write(template % {'version': VERSION,
50 'git_description': GIT_DESCRIPTION})
51 finally:
52 out.close()
53
54
55 def install(setup): #pylint: disable=W0621
56 from setuptools import find_packages
57 #Crotate Module
58 from distutils.core import Extension
59 from os.path import dirname, join
60 cwd = dirname(__file__)
61 try:
62 import numpy as np
63 except ImportError:
64 print("SunPy WARNING: NumPy must be installed first to build the C extension")
65
66 if 'np' in locals():
67 module = 'sunpy.image.Crotate' # import this
68 sourcefiles = [join(cwd, 'sunpy', 'image', 'src', 'rot_extn.c'),
69 join(cwd, 'sunpy', 'image', 'src', 'transform', 'aff_tr.c')]
70 libs = ['m']
71 # -ON for compile optimise
72 gcc_args = ['-std=c99', '-O3']
73 # gcc_args = ['-std=c99']
74
75 # need *module* name here
76 crotate = Extension(module,
77 sources = sourcefiles,
78 libraries = libs,
79 extra_compile_args = gcc_args,
80 include_dirs =
81 [np.get_include(), join(cwd, 'sunpy', 'image', 'src')]
82 )
83
84 module_ana = 'sunpy.io._pyana'
85 sourcefiles_ana = [join(cwd, 'sunpy', 'io', 'src', 'ana', 'anacompress.c'),
86 join(cwd, 'sunpy', 'io', 'src', 'ana', 'anadecompress.c'),
87 join(cwd, 'sunpy', 'io', 'src', 'ana', 'anarw.c'),
88 join(cwd, 'sunpy', 'io', 'src', 'ana', 'testrw.c'),
89 join(cwd, 'sunpy', 'io', 'src', 'ana', '_pyana.c')]
90
91 ana = Extension(module_ana,
92 sources = sourcefiles_ana,
93 libraries = libs,
94 extra_compile_args = gcc_args,
95 include_dirs =
96 [np.get_include(), join(cwd, 'sunpy', 'io', 'src')]
97 )
98 ext_modules = []
99 if 'crotate' in locals():
100 ext_modules.append(crotate)
101 if 'ana' in locals():
102 ext_modules.append(ana)
103
104 write_version_py()
105
106 setup(
107 author="Steven Christe, Matt Earnshaw, Russell Hewett, Keith Hughitt, Jack Ireland, Florian Mayer, Stuart Mumford, Albert Shih, David Perez-Suarez et. al",
108 author_email="[email protected]",
109 classifiers=CLASSIFIERS,
110 description=DOCLINES[0],
111 download_url="http://www.sunpy.org/download/",
112 # 2011/11/21: disabling for now to prevent paver warnings
113 #extra_requires={
114 # "Plotman": ['PyQt4']
115 #},
116 install_requires=[
117 'numpy>1.6.0',
118 'astropy>=0.3.0',
119 'scipy',
120 # 'suds',
121 'pandas>=0.10.0',
122 'matplotlib>=1.1',
123 'glymur>=0.5.9',
124 # 'beautifulsoup4',
125 'sqlalchemy',
126 ],
127 license="BSD",
128 long_description="\n".join(DOCLINES[2:]),
129 maintainer="SunPy Developers",
130 maintainer_email="[email protected]",
131 name="sunpy",
132 packages=find_packages(),
133 package_data={'': ['*.fits', '*.fit', 'sunpyrc']},
134 platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
135 provides=['sunpy'],
136 url="http://www.sunpy.org/",
137 use_2to3=True,
138 version=VERSION,
139 ext_modules = ext_modules
140 )
141
142 if __name__ == '__main__':
143 from distribute_setup import use_setuptools
144 use_setuptools()
145 from setuptools import setup
146 install(setup)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -109,19 +109,12 @@
classifiers=CLASSIFIERS,
description=DOCLINES[0],
download_url="http://www.sunpy.org/download/",
- # 2011/11/21: disabling for now to prevent paver warnings
- #extra_requires={
- # "Plotman": ['PyQt4']
- #},
install_requires=[
'numpy>1.6.0',
'astropy>=0.3.0',
'scipy',
- # 'suds',
'pandas>=0.10.0',
'matplotlib>=1.1',
- 'glymur>=0.5.9',
- # 'beautifulsoup4',
'sqlalchemy',
],
license="BSD",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -109,19 +109,12 @@\n classifiers=CLASSIFIERS,\n description=DOCLINES[0],\n download_url=\"http://www.sunpy.org/download/\",\n- # 2011/11/21: disabling for now to prevent paver warnings\n- #extra_requires={\n- # \"Plotman\": ['PyQt4']\n- #},\n install_requires=[\n 'numpy>1.6.0',\n 'astropy>=0.3.0',\n 'scipy',\n- # 'suds',\n 'pandas>=0.10.0',\n 'matplotlib>=1.1',\n- 'glymur>=0.5.9',\n- # 'beautifulsoup4',\n 'sqlalchemy',\n ],\n license=\"BSD\",\n", "issue": "Make Glymur an optional dependancy\nThis is \"make io imports optional\"\n\n", "before_files": [{"content": "\"\"\"\nSunPy: Python for Solar Physics\n\nThe SunPy project is an effort to create an open-source software library for\nsolar physics using the Python programming language.\n\"\"\"\nDOCLINES = __doc__.split(\"\\n\")\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'\n]\n\nVERSION = '0.3.2'\n\ndef git_description():\n import subprocess\n try:\n out = subprocess.Popen(['git', 'describe', '--tags'], stdout = subprocess.PIPE).communicate()[0]\n description = out.strip().decode('ascii')\n except OSError:\n description = 'Error: could not run git'\n return description\n\ndef write_version_py():\n import os\n if os.path.exists('.git'):\n GIT_DESCRIPTION = git_description()\n else:\n GIT_DESCRIPTION = 'N/A'\n\n out = open('sunpy/version.py', 'w')\n template = \"\"\"# This file is automatically generated by SunPy's setup.py\nversion = '%(version)s'\ngit_description = '%(git_description)s'\n\"\"\"\n try:\n out.write(template % {'version': VERSION,\n 'git_description': GIT_DESCRIPTION})\n finally:\n out.close()\n\n\ndef install(setup): #pylint: disable=W0621\n from setuptools import find_packages\n #Crotate Module\n from distutils.core import Extension\n from os.path import dirname, join\n cwd = dirname(__file__)\n try:\n import numpy as np\n except ImportError:\n print(\"SunPy WARNING: NumPy must be installed first to build the C extension\")\n\n if 'np' in locals():\n module = 'sunpy.image.Crotate' # import this\n sourcefiles = [join(cwd, 'sunpy', 'image', 'src', 'rot_extn.c'),\n join(cwd, 'sunpy', 'image', 'src', 'transform', 'aff_tr.c')]\n libs = ['m']\n # -ON for compile optimise\n gcc_args = ['-std=c99', '-O3']\n # gcc_args = ['-std=c99']\n\n # need *module* name here\n crotate = Extension(module,\n sources = sourcefiles,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'image', 'src')]\n )\n\n module_ana = 'sunpy.io._pyana'\n sourcefiles_ana = [join(cwd, 'sunpy', 'io', 'src', 'ana', 'anacompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anadecompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anarw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'testrw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', '_pyana.c')]\n\n ana = Extension(module_ana,\n sources = sourcefiles_ana,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'io', 'src')]\n )\n ext_modules = []\n if 'crotate' in locals():\n ext_modules.append(crotate)\n if 'ana' in locals():\n ext_modules.append(ana)\n\n write_version_py()\n\n setup(\n\tauthor=\"Steven Christe, Matt Earnshaw, Russell Hewett, Keith Hughitt, Jack Ireland, Florian Mayer, Stuart Mumford, Albert Shih, David Perez-Suarez et. al\",\n author_email=\"[email protected]\",\n classifiers=CLASSIFIERS,\n description=DOCLINES[0],\n download_url=\"http://www.sunpy.org/download/\",\n # 2011/11/21: disabling for now to prevent paver warnings\n #extra_requires={\n # \"Plotman\": ['PyQt4']\n #},\n install_requires=[\n 'numpy>1.6.0',\n 'astropy>=0.3.0',\n 'scipy',\n # 'suds',\n 'pandas>=0.10.0',\n 'matplotlib>=1.1',\n 'glymur>=0.5.9',\n # 'beautifulsoup4',\n 'sqlalchemy',\n ],\n license=\"BSD\",\n long_description=\"\\n\".join(DOCLINES[2:]),\n maintainer=\"SunPy Developers\",\n maintainer_email=\"[email protected]\",\n name=\"sunpy\",\n packages=find_packages(),\n package_data={'': ['*.fits', '*.fit', 'sunpyrc']},\n platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],\n provides=['sunpy'],\n url=\"http://www.sunpy.org/\",\n use_2to3=True,\n version=VERSION,\n ext_modules = ext_modules\n )\n\nif __name__ == '__main__':\n from distribute_setup import use_setuptools\n use_setuptools()\n from setuptools import setup\n install(setup)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\nSunPy: Python for Solar Physics\n\nThe SunPy project is an effort to create an open-source software library for\nsolar physics using the Python programming language.\n\"\"\"\nDOCLINES = __doc__.split(\"\\n\")\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'\n]\n\nVERSION = '0.3.2'\n\ndef git_description():\n import subprocess\n try:\n out = subprocess.Popen(['git', 'describe', '--tags'], stdout = subprocess.PIPE).communicate()[0]\n description = out.strip().decode('ascii')\n except OSError:\n description = 'Error: could not run git'\n return description\n\ndef write_version_py():\n import os\n if os.path.exists('.git'):\n GIT_DESCRIPTION = git_description()\n else:\n GIT_DESCRIPTION = 'N/A'\n\n out = open('sunpy/version.py', 'w')\n template = \"\"\"# This file is automatically generated by SunPy's setup.py\nversion = '%(version)s'\ngit_description = '%(git_description)s'\n\"\"\"\n try:\n out.write(template % {'version': VERSION,\n 'git_description': GIT_DESCRIPTION})\n finally:\n out.close()\n\n\ndef install(setup): #pylint: disable=W0621\n from setuptools import find_packages\n #Crotate Module\n from distutils.core import Extension\n from os.path import dirname, join\n cwd = dirname(__file__)\n try:\n import numpy as np\n except ImportError:\n print(\"SunPy WARNING: NumPy must be installed first to build the C extension\")\n\n if 'np' in locals():\n module = 'sunpy.image.Crotate' # import this\n sourcefiles = [join(cwd, 'sunpy', 'image', 'src', 'rot_extn.c'),\n join(cwd, 'sunpy', 'image', 'src', 'transform', 'aff_tr.c')]\n libs = ['m']\n # -ON for compile optimise\n gcc_args = ['-std=c99', '-O3']\n # gcc_args = ['-std=c99']\n\n # need *module* name here\n crotate = Extension(module,\n sources = sourcefiles,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'image', 'src')]\n )\n\n module_ana = 'sunpy.io._pyana'\n sourcefiles_ana = [join(cwd, 'sunpy', 'io', 'src', 'ana', 'anacompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anadecompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anarw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'testrw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', '_pyana.c')]\n\n ana = Extension(module_ana,\n sources = sourcefiles_ana,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'io', 'src')]\n )\n ext_modules = []\n if 'crotate' in locals():\n ext_modules.append(crotate)\n if 'ana' in locals():\n ext_modules.append(ana)\n\n write_version_py()\n\n setup(\n\tauthor=\"Steven Christe, Matt Earnshaw, Russell Hewett, Keith Hughitt, Jack Ireland, Florian Mayer, Stuart Mumford, Albert Shih, David Perez-Suarez et. al\",\n author_email=\"[email protected]\",\n classifiers=CLASSIFIERS,\n description=DOCLINES[0],\n download_url=\"http://www.sunpy.org/download/\",\n install_requires=[\n 'numpy>1.6.0',\n 'astropy>=0.3.0',\n 'scipy',\n 'pandas>=0.10.0',\n 'matplotlib>=1.1',\n 'sqlalchemy',\n ],\n license=\"BSD\",\n long_description=\"\\n\".join(DOCLINES[2:]),\n maintainer=\"SunPy Developers\",\n maintainer_email=\"[email protected]\",\n name=\"sunpy\",\n packages=find_packages(),\n package_data={'': ['*.fits', '*.fit', 'sunpyrc']},\n platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],\n provides=['sunpy'],\n url=\"http://www.sunpy.org/\",\n use_2to3=True,\n version=VERSION,\n ext_modules = ext_modules\n )\n\nif __name__ == '__main__':\n from distribute_setup import use_setuptools\n use_setuptools()\n from setuptools import setup\n install(setup)\n", "path": "setup.py"}]} | 1,829 | 200 |
gh_patches_debug_29755 | rasdani/github-patches | git_diff | joke2k__faker-1036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Edit on Github" link broken in ReadTheDocs
http://fake-factory.readthedocs.org/en/latest/locales.html
Clicking "Edit on Github" results in a 404 error.
EDIT:
http://fake-factory.readthedocs.org/en/latest/ has a github link to `https://github.com/joke2k/faker/blob/docs/docs/index.rst` when the correct link is
`https://github.com/joke2k/faker/blob/master/docs/index.rst`
(Note the doubled up `docs/docs` instead of `master/docs`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/build_docs.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import print_function, unicode_literals
4
5 import os
6 import pprint
7 import sys
8
9 import six
10
11 DOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))
12
13
14 def write(fh, s):
15 return fh.write(s.encode('utf-8'))
16
17
18 def write_provider(fh, doc, provider, formatters, excludes=None):
19
20 if excludes is None:
21 excludes = []
22
23 write(fh, '\n')
24 title = "``{0}``".format(doc.get_provider_name(provider))
25 write(fh, '%s\n' % title)
26 write(fh, "-" * len(title))
27 write(fh, '\n\n::\n')
28
29 for signature, example in formatters.items():
30 if signature in excludes:
31 continue
32 try:
33 # `pprint` can't format sets of heterogenous types.
34 if not isinstance(example, set):
35 example = pprint.pformat(example, indent=4)
36 lines = six.text_type(example).expandtabs().splitlines()
37 except UnicodeEncodeError:
38 msg = 'error on "{0}" with value "{1}"'.format(signature, example)
39 raise Exception(msg)
40 write(fh, '\n')
41 write(fh, "\t{fake}\n{example}\n".format(
42 fake=signature,
43 example='\n'.join(['\t# ' + line for line in lines]),
44 ))
45
46
47 def write_docs(*args, **kwargs):
48 from faker import Faker, documentor
49 from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES
50
51 fake = Faker(locale=DEFAULT_LOCALE)
52
53 from faker.providers import BaseProvider
54 base_provider_formatters = [f for f in dir(BaseProvider)]
55
56 doc = documentor.Documentor(fake)
57
58 formatters = doc.get_formatters(with_args=True, with_defaults=True)
59
60 for provider, fakers in formatters:
61 provider_name = doc.get_provider_name(provider)
62 fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)
63 with open(fname, 'wb') as fh:
64 write_provider(fh, doc, provider, fakers)
65
66 with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:
67 write(fh, 'Providers\n')
68 write(fh, '=========\n')
69 write(fh, '.. toctree::\n')
70 write(fh, ' :maxdepth: 2\n\n')
71 [write(fh, ' providers/%s\n' % doc.get_provider_name(provider))
72 for provider, fakers in formatters]
73
74 AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)
75 for lang in AVAILABLE_LOCALES:
76 fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)
77 with open(fname, 'wb') as fh:
78 write(fh, '\n')
79 title = 'Language {0}\n'.format(lang)
80 write(fh, title)
81 write(fh, '=' * len(title))
82 write(fh, '\n')
83 fake = Faker(locale=lang)
84 d = documentor.Documentor(fake)
85
86 for p, fs in d.get_formatters(with_args=True, with_defaults=True,
87 locale=lang,
88 excludes=base_provider_formatters):
89 write_provider(fh, d, p, fs)
90
91 with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:
92 write(fh, 'Locales\n')
93 write(fh, '=======\n')
94 write(fh, '.. toctree::\n')
95 write(fh, ' :maxdepth: 2\n\n')
96 [write(fh, ' locales/%s\n' % lang) for lang in AVAILABLE_LOCALES]
97
98
99 # wrappers for sphinx
100 def _main(app, *args, **kwargs):
101 return write_docs(*args, **kwargs)
102
103
104 def setup(app):
105 app.connect(str('builder-inited'), _main)
106
107
108 if __name__ == "__main__":
109 write_docs(*sys.argv[1:])
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/build_docs.py b/faker/build_docs.py
--- a/faker/build_docs.py
+++ b/faker/build_docs.py
@@ -61,9 +61,11 @@
provider_name = doc.get_provider_name(provider)
fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)
with open(fname, 'wb') as fh:
+ write(fh, ':github_url: hide\n\n')
write_provider(fh, doc, provider, fakers)
with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:
+ write(fh, ':github_url: hide\n\n')
write(fh, 'Providers\n')
write(fh, '=========\n')
write(fh, '.. toctree::\n')
@@ -75,7 +77,7 @@
for lang in AVAILABLE_LOCALES:
fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)
with open(fname, 'wb') as fh:
- write(fh, '\n')
+ write(fh, ':github_url: hide\n\n')
title = 'Language {0}\n'.format(lang)
write(fh, title)
write(fh, '=' * len(title))
@@ -89,6 +91,7 @@
write_provider(fh, d, p, fs)
with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:
+ write(fh, ':github_url: hide\n\n')
write(fh, 'Locales\n')
write(fh, '=======\n')
write(fh, '.. toctree::\n')
| {"golden_diff": "diff --git a/faker/build_docs.py b/faker/build_docs.py\n--- a/faker/build_docs.py\n+++ b/faker/build_docs.py\n@@ -61,9 +61,11 @@\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n+ write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n \n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n+ write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n@@ -75,7 +77,7 @@\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n- write(fh, '\\n')\n+ write(fh, ':github_url: hide\\n\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n@@ -89,6 +91,7 @@\n write_provider(fh, d, p, fs)\n \n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n+ write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n", "issue": "\"Edit on Github\" link broken in ReadTheDocs\nhttp://fake-factory.readthedocs.org/en/latest/locales.html\n\nClicking \"Edit on Github\" results in a 404 error.\n\nEDIT: \nhttp://fake-factory.readthedocs.org/en/latest/ has a github link to `https://github.com/joke2k/faker/blob/docs/docs/index.rst` when the correct link is\n`https://github.com/joke2k/faker/blob/master/docs/index.rst`\n(Note the doubled up `docs/docs` instead of `master/docs`)\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport pprint\nimport sys\n\nimport six\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n # `pprint` can't format sets of heterogenous types.\n if not isinstance(example, set):\n example = pprint.pformat(example, indent=4)\n lines = six.text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n write(fh, '\\n')\n write(fh, \"\\t{fake}\\n{example}\\n\".format(\n fake=signature,\n example='\\n'.join(['\\t# ' + line for line in lines]),\n ))\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write_provider(fh, doc, provider, fakers)\n\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, '\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport pprint\nimport sys\n\nimport six\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n # `pprint` can't format sets of heterogenous types.\n if not isinstance(example, set):\n example = pprint.pformat(example, indent=4)\n lines = six.text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n write(fh, '\\n')\n write(fh, \"\\t{fake}\\n{example}\\n\".format(\n fake=signature,\n example='\\n'.join(['\\t# ' + line for line in lines]),\n ))\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}]} | 1,479 | 371 |
gh_patches_debug_8945 | rasdani/github-patches | git_diff | open-mmlab__mmaction2-624 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS
Hello
when I train AVA dataset find a error
ValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS
how to solve it
think you very much
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmaction/models/builder.py`
Content:
```
1 import torch.nn as nn
2 from mmcv.utils import Registry, build_from_cfg
3
4 from mmaction.utils import import_module_error_func
5 from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS
6
7 try:
8 from mmdet.models.builder import DETECTORS, build_detector
9 except (ImportError, ModuleNotFoundError):
10 # Define an empty registry and building func, so that can import
11 DETECTORS = Registry('detector')
12
13 @import_module_error_func('mmdet')
14 def build_detector(cfg, train_cfg, test_cfg):
15 pass
16
17
18 def build(cfg, registry, default_args=None):
19 """Build a module.
20
21 Args:
22 cfg (dict, list[dict]): The config of modules, it is either a dict
23 or a list of configs.
24 registry (:obj:`Registry`): A registry the module belongs to.
25 default_args (dict, optional): Default arguments to build the module.
26 Defaults to None.
27
28 Returns:
29 nn.Module: A built nn module.
30 """
31
32 if isinstance(cfg, list):
33 modules = [
34 build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
35 ]
36 return nn.Sequential(*modules)
37
38 return build_from_cfg(cfg, registry, default_args)
39
40
41 def build_backbone(cfg):
42 """Build backbone."""
43 return build(cfg, BACKBONES)
44
45
46 def build_head(cfg):
47 """Build head."""
48 return build(cfg, HEADS)
49
50
51 def build_recognizer(cfg, train_cfg=None, test_cfg=None):
52 """Build recognizer."""
53 return build(cfg, RECOGNIZERS,
54 dict(train_cfg=train_cfg, test_cfg=test_cfg))
55
56
57 def build_loss(cfg):
58 """Build loss."""
59 return build(cfg, LOSSES)
60
61
62 def build_localizer(cfg):
63 """Build localizer."""
64 return build(cfg, LOCALIZERS)
65
66
67 def build_model(cfg, train_cfg=None, test_cfg=None):
68 """Build model."""
69 args = cfg.copy()
70 obj_type = args.pop('type')
71 if obj_type in LOCALIZERS:
72 return build_localizer(cfg)
73 if obj_type in RECOGNIZERS:
74 return build_recognizer(cfg, train_cfg, test_cfg)
75 if obj_type in DETECTORS:
76 return build_detector(cfg, train_cfg, test_cfg)
77 raise ValueError(f'{obj_type} is not registered in '
78 'LOCALIZERS, RECOGNIZERS or DETECTORS')
79
80
81 def build_neck(cfg):
82 """Build neck."""
83 return build(cfg, NECKS)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmaction/models/builder.py b/mmaction/models/builder.py
--- a/mmaction/models/builder.py
+++ b/mmaction/models/builder.py
@@ -74,6 +74,10 @@
return build_recognizer(cfg, train_cfg, test_cfg)
if obj_type in DETECTORS:
return build_detector(cfg, train_cfg, test_cfg)
+ model_in_mmdet = ['FastRCNN']
+ if obj_type in model_in_mmdet:
+ raise ImportError(
+ 'Please install mmdet for spatial temporal detection tasks.')
raise ValueError(f'{obj_type} is not registered in '
'LOCALIZERS, RECOGNIZERS or DETECTORS')
| {"golden_diff": "diff --git a/mmaction/models/builder.py b/mmaction/models/builder.py\n--- a/mmaction/models/builder.py\n+++ b/mmaction/models/builder.py\n@@ -74,6 +74,10 @@\n return build_recognizer(cfg, train_cfg, test_cfg)\n if obj_type in DETECTORS:\n return build_detector(cfg, train_cfg, test_cfg)\n+ model_in_mmdet = ['FastRCNN']\n+ if obj_type in model_in_mmdet:\n+ raise ImportError(\n+ 'Please install mmdet for spatial temporal detection tasks.')\n raise ValueError(f'{obj_type} is not registered in '\n 'LOCALIZERS, RECOGNIZERS or DETECTORS')\n", "issue": "ValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS\nHello\r\nwhen I train AVA dataset find a error\r\nValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS\r\nhow to solve it \r\nthink you very much \n", "before_files": [{"content": "import torch.nn as nn\nfrom mmcv.utils import Registry, build_from_cfg\n\nfrom mmaction.utils import import_module_error_func\nfrom .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS\n\ntry:\n from mmdet.models.builder import DETECTORS, build_detector\nexcept (ImportError, ModuleNotFoundError):\n # Define an empty registry and building func, so that can import\n DETECTORS = Registry('detector')\n\n @import_module_error_func('mmdet')\n def build_detector(cfg, train_cfg, test_cfg):\n pass\n\n\ndef build(cfg, registry, default_args=None):\n \"\"\"Build a module.\n\n Args:\n cfg (dict, list[dict]): The config of modules, it is either a dict\n or a list of configs.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n \"\"\"\n\n if isinstance(cfg, list):\n modules = [\n build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg\n ]\n return nn.Sequential(*modules)\n\n return build_from_cfg(cfg, registry, default_args)\n\n\ndef build_backbone(cfg):\n \"\"\"Build backbone.\"\"\"\n return build(cfg, BACKBONES)\n\n\ndef build_head(cfg):\n \"\"\"Build head.\"\"\"\n return build(cfg, HEADS)\n\n\ndef build_recognizer(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build recognizer.\"\"\"\n return build(cfg, RECOGNIZERS,\n dict(train_cfg=train_cfg, test_cfg=test_cfg))\n\n\ndef build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return build(cfg, LOSSES)\n\n\ndef build_localizer(cfg):\n \"\"\"Build localizer.\"\"\"\n return build(cfg, LOCALIZERS)\n\n\ndef build_model(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build model.\"\"\"\n args = cfg.copy()\n obj_type = args.pop('type')\n if obj_type in LOCALIZERS:\n return build_localizer(cfg)\n if obj_type in RECOGNIZERS:\n return build_recognizer(cfg, train_cfg, test_cfg)\n if obj_type in DETECTORS:\n return build_detector(cfg, train_cfg, test_cfg)\n raise ValueError(f'{obj_type} is not registered in '\n 'LOCALIZERS, RECOGNIZERS or DETECTORS')\n\n\ndef build_neck(cfg):\n \"\"\"Build neck.\"\"\"\n return build(cfg, NECKS)\n", "path": "mmaction/models/builder.py"}], "after_files": [{"content": "import torch.nn as nn\nfrom mmcv.utils import Registry, build_from_cfg\n\nfrom mmaction.utils import import_module_error_func\nfrom .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS\n\ntry:\n from mmdet.models.builder import DETECTORS, build_detector\nexcept (ImportError, ModuleNotFoundError):\n # Define an empty registry and building func, so that can import\n DETECTORS = Registry('detector')\n\n @import_module_error_func('mmdet')\n def build_detector(cfg, train_cfg, test_cfg):\n pass\n\n\ndef build(cfg, registry, default_args=None):\n \"\"\"Build a module.\n\n Args:\n cfg (dict, list[dict]): The config of modules, it is either a dict\n or a list of configs.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n \"\"\"\n\n if isinstance(cfg, list):\n modules = [\n build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg\n ]\n return nn.Sequential(*modules)\n\n return build_from_cfg(cfg, registry, default_args)\n\n\ndef build_backbone(cfg):\n \"\"\"Build backbone.\"\"\"\n return build(cfg, BACKBONES)\n\n\ndef build_head(cfg):\n \"\"\"Build head.\"\"\"\n return build(cfg, HEADS)\n\n\ndef build_recognizer(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build recognizer.\"\"\"\n return build(cfg, RECOGNIZERS,\n dict(train_cfg=train_cfg, test_cfg=test_cfg))\n\n\ndef build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return build(cfg, LOSSES)\n\n\ndef build_localizer(cfg):\n \"\"\"Build localizer.\"\"\"\n return build(cfg, LOCALIZERS)\n\n\ndef build_model(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build model.\"\"\"\n args = cfg.copy()\n obj_type = args.pop('type')\n if obj_type in LOCALIZERS:\n return build_localizer(cfg)\n if obj_type in RECOGNIZERS:\n return build_recognizer(cfg, train_cfg, test_cfg)\n if obj_type in DETECTORS:\n return build_detector(cfg, train_cfg, test_cfg)\n model_in_mmdet = ['FastRCNN']\n if obj_type in model_in_mmdet:\n raise ImportError(\n 'Please install mmdet for spatial temporal detection tasks.')\n raise ValueError(f'{obj_type} is not registered in '\n 'LOCALIZERS, RECOGNIZERS or DETECTORS')\n\n\ndef build_neck(cfg):\n \"\"\"Build neck.\"\"\"\n return build(cfg, NECKS)\n", "path": "mmaction/models/builder.py"}]} | 1,050 | 157 |
gh_patches_debug_21034 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing package named `packaging`
### Describe the bug
#3267 introduced a new python dependency named `packaging` (https://pypi.org/project/packaging/). We need to add it to the `install_requires` list in `core/setup.py`.
### Steps To Reproduce
This issue only affects 0.20.0b1 at the moment. Install dbt v0.20.0b1 in a bare virtualenv and run a command that utilizes the missing packaging (`dbt --version`)
```bash
$ dbt --version
Traceback (most recent call last):
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/bin/dbt", line 5, in <module>
from dbt.main import main
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/main.py", line 17, in <module>
import dbt.task.deps as deps_task
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/task/deps.py", line 9, in <module>
from dbt.deps.resolver import resolve_packages
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/resolver.py", line 11, in <module>
from dbt.deps.git import GitUnpinnedPackage
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/git.py", line 5, in <module>
from dbt.clients import git, system
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/clients/git.py", line 7, in <module>
from packaging import version
ModuleNotFoundError: No module named 'packaging'
```
### Additional context
This wasn't caught during testing since `packaging` is installed because it is an upstream dependency of our testing tools (`tox` and `pytest`). Our current testing setup is not configured to catch this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 def read(fname):
23 return open(os.path.join(os.path.dirname(__file__), fname)).read()
24
25
26 package_name = "dbt-core"
27 package_version = "0.20.0b1"
28 description = """dbt (data build tool) is a command line tool that helps \
29 analysts and engineers transform data in their warehouse more effectively"""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35 description=description,
36 long_description=description,
37 author="Fishtown Analytics",
38 author_email="[email protected]",
39 url="https://github.com/fishtown-analytics/dbt",
40 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
41 package_data={
42 'dbt': [
43 'include/index.html',
44 'include/global_project/dbt_project.yml',
45 'include/global_project/docs/*.md',
46 'include/global_project/macros/*.sql',
47 'include/global_project/macros/**/*.sql',
48 'include/global_project/macros/**/**/*.sql',
49 'py.typed',
50 ]
51 },
52 test_suite='test',
53 entry_points={
54 'console_scripts': [
55 'dbt = dbt.main:main',
56 ],
57 },
58 scripts=[
59 'scripts/dbt',
60 ],
61 install_requires=[
62 'Jinja2==2.11.2',
63 'PyYAML>=3.11',
64 'sqlparse>=0.2.3,<0.4',
65 'networkx>=2.3,<3',
66 'minimal-snowplow-tracker==0.0.2',
67 'colorama>=0.3.9,<0.4.5',
68 'agate>=1.6,<1.6.2',
69 'isodate>=0.6,<0.7',
70 'json-rpc>=1.12,<2',
71 'werkzeug>=0.15,<2.0',
72 'dataclasses>=0.6,<0.9;python_version<"3.7"',
73 'hologram==0.0.14',
74 'logbook>=1.5,<1.6',
75 'mashumaro==2.0',
76 'typing-extensions>=3.7.4,<3.8',
77 # the following are all to match snowflake-connector-python
78 'requests<3.0.0',
79 'idna>=2.5,<3',
80 'cffi>=1.9,<2.0.0',
81 ],
82 zip_safe=False,
83 classifiers=[
84 'Development Status :: 5 - Production/Stable',
85
86 'License :: OSI Approved :: Apache Software License',
87
88 'Operating System :: Microsoft :: Windows',
89 'Operating System :: MacOS :: MacOS X',
90 'Operating System :: POSIX :: Linux',
91
92 'Programming Language :: Python :: 3.6',
93 'Programming Language :: Python :: 3.7',
94 'Programming Language :: Python :: 3.8',
95 'Programming Language :: Python :: 3.9',
96 ],
97 python_requires=">=3.6.3",
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -61,19 +61,20 @@
install_requires=[
'Jinja2==2.11.2',
'PyYAML>=3.11',
- 'sqlparse>=0.2.3,<0.4',
- 'networkx>=2.3,<3',
- 'minimal-snowplow-tracker==0.0.2',
- 'colorama>=0.3.9,<0.4.5',
'agate>=1.6,<1.6.2',
- 'isodate>=0.6,<0.7',
- 'json-rpc>=1.12,<2',
- 'werkzeug>=0.15,<2.0',
+ 'colorama>=0.3.9,<0.4.5',
'dataclasses>=0.6,<0.9;python_version<"3.7"',
'hologram==0.0.14',
+ 'isodate>=0.6,<0.7',
+ 'json-rpc>=1.12,<2',
'logbook>=1.5,<1.6',
'mashumaro==2.0',
+ 'minimal-snowplow-tracker==0.0.2',
+ 'networkx>=2.3,<3',
+ 'packaging~=20.9',
+ 'sqlparse>=0.2.3,<0.4',
'typing-extensions>=3.7.4,<3.8',
+ 'werkzeug>=0.15,<2.0',
# the following are all to match snowflake-connector-python
'requests<3.0.0',
'idna>=2.5,<3',
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -61,19 +61,20 @@\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n- 'sqlparse>=0.2.3,<0.4',\n- 'networkx>=2.3,<3',\n- 'minimal-snowplow-tracker==0.0.2',\n- 'colorama>=0.3.9,<0.4.5',\n 'agate>=1.6,<1.6.2',\n- 'isodate>=0.6,<0.7',\n- 'json-rpc>=1.12,<2',\n- 'werkzeug>=0.15,<2.0',\n+ 'colorama>=0.3.9,<0.4.5',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n+ 'isodate>=0.6,<0.7',\n+ 'json-rpc>=1.12,<2',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.0',\n+ 'minimal-snowplow-tracker==0.0.2',\n+ 'networkx>=2.3,<3',\n+ 'packaging~=20.9',\n+ 'sqlparse>=0.2.3,<0.4',\n 'typing-extensions>=3.7.4,<3.8',\n+ 'werkzeug>=0.15,<2.0',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<3',\n", "issue": "Missing package named `packaging`\n### Describe the bug\r\n#3267 introduced a new python dependency named `packaging` (https://pypi.org/project/packaging/). We need to add it to the `install_requires` list in `core/setup.py`.\r\n\r\n### Steps To Reproduce\r\nThis issue only affects 0.20.0b1 at the moment. Install dbt v0.20.0b1 in a bare virtualenv and run a command that utilizes the missing packaging (`dbt --version`)\r\n\r\n```bash\r\n$ dbt --version\r\nTraceback (most recent call last):\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/bin/dbt\", line 5, in <module>\r\n from dbt.main import main\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/main.py\", line 17, in <module>\r\n import dbt.task.deps as deps_task\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/task/deps.py\", line 9, in <module>\r\n from dbt.deps.resolver import resolve_packages\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/resolver.py\", line 11, in <module>\r\n from dbt.deps.git import GitUnpinnedPackage\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/git.py\", line 5, in <module>\r\n from dbt.clients import git, system\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/clients/git.py\", line 7, in <module>\r\n from packaging import version\r\nModuleNotFoundError: No module named 'packaging'\r\n```\r\n\r\n### Additional context\r\nThis wasn't caught during testing since `packaging` is installed because it is an upstream dependency of our testing tools (`tox` and `pytest`). Our current testing setup is not configured to catch this. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.20.0b1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.4.5',\n 'agate>=1.6,<1.6.2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.0',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<3',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.20.0b1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'agate>=1.6,<1.6.2',\n 'colorama>=0.3.9,<0.4.5',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.0',\n 'minimal-snowplow-tracker==0.0.2',\n 'networkx>=2.3,<3',\n 'packaging~=20.9',\n 'sqlparse>=0.2.3,<0.4',\n 'typing-extensions>=3.7.4,<3.8',\n 'werkzeug>=0.15,<2.0',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<3',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]} | 1,789 | 411 |
gh_patches_debug_415 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6492 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Initial messages containing non-ascii characters fail if codename filtering is enabled.
## Description
Codename filtering was introduced in 2.3.0, allowing admins to block initial submissions containing only the user's codename, as they should not be shared with journalists. The filter uses the `compare_digest()` function to ensure constant-time comparison, but this fn will throw a `TypeError` if any of the strings being compared contain Unicode.
## Steps to Reproduce
- start up `make dev` on 2.4.0
- visit the JI and enable codename filtering under Admin > Instance Config
- visit the SI, create a new source, and submit an initial message containing unicode, ie `Hallo! ö, ü, ä, or ß`
## Expected Behavior
- Message is submitted
## Actual Behavior
- 500 error, and (in dev) stack trace due to TypeError
## Comments
Suggestions to fix, any other relevant information.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/utils.py`
Content:
```
1 import json
2 import re
3 import subprocess
4 import typing
5 from hmac import compare_digest
6
7 import werkzeug
8 from flask import current_app, flash, redirect, render_template, url_for
9 from flask.sessions import SessionMixin
10 from flask_babel import gettext
11 from markupsafe import Markup, escape
12 from source_user import SourceUser
13 from store import Storage
14
15 if typing.TYPE_CHECKING:
16 from typing import Optional
17
18
19 def codename_detected(message: str, codename: str) -> bool:
20 """
21 Check for codenames in incoming messages. including case where user copy/pasted
22 from /generate or the codename widget on the same page
23 """
24 message = message.strip()
25
26 return compare_digest(message.strip(), codename)
27
28
29 def flash_msg(
30 category: str,
31 declarative: "Optional[str]",
32 *msg_contents: "str",
33 ) -> None:
34 """
35 Render flash message with a (currently) optional declarative heading.
36 """
37 contents = Markup("<br>".join([escape(part) for part in msg_contents]))
38
39 msg = render_template(
40 "flash_message.html",
41 declarative=declarative,
42 msg_contents=contents,
43 )
44 flash(Markup(msg), category)
45
46
47 def clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:
48 msg = render_template(
49 "flash_message.html",
50 declarative=gettext("Important"),
51 msg_contents=Markup(
52 gettext(
53 'You were logged out due to inactivity. Click the <img src={icon} alt="" '
54 'width="16" height="16"> <b>New Identity</b> button in your Tor Browser\'s '
55 "toolbar before moving on. This will clear your Tor Browser activity data on "
56 "this device."
57 ).format(icon=url_for("static", filename="i/torbroom.png"))
58 ),
59 )
60
61 # Clear the session after we render the message so it's localized
62 flask_session.clear()
63
64 flash(Markup(msg), "error")
65 return redirect(url_for("main.index"))
66
67
68 def normalize_timestamps(logged_in_source: SourceUser) -> None:
69 """
70 Update the timestamps on all of the source's submissions. This
71 minimizes metadata that could be useful to investigators. See
72 #301.
73 """
74 source_in_db = logged_in_source.get_db_record()
75 sub_paths = [
76 Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)
77 for submission in source_in_db.submissions
78 ]
79 if len(sub_paths) > 1:
80 args = ["touch", "--no-create"]
81 args.extend(sub_paths)
82 rc = subprocess.call(args)
83 if rc != 0:
84 current_app.logger.warning(
85 "Couldn't normalize submission " "timestamps (touch exited with %d)" % rc
86 )
87
88
89 def check_url_file(path: str, regexp: str) -> "Optional[str]":
90 """
91 Check that a file exists at the path given and contains a single line
92 matching the regexp. Used for checking the source interface address
93 files in /var/lib/securedrop (as the Apache user can't read Tor config)
94 """
95 try:
96 f = open(path, "r")
97 contents = f.readline().strip()
98 f.close()
99 if re.match(regexp, contents):
100 return contents
101 else:
102 return None
103 except IOError:
104 return None
105
106
107 def get_sourcev3_url() -> "Optional[str]":
108 return check_url_file("/var/lib/securedrop/source_v3_url", r"^[a-z0-9]{56}\.onion$")
109
110
111 def fit_codenames_into_cookie(codenames: dict) -> dict:
112 """
113 If `codenames` will approach `werkzeug.Response.max_cookie_size` once
114 serialized, incrementally pop off the oldest codename until the remaining
115 (newer) ones will fit.
116 """
117
118 serialized = json.dumps(codenames).encode()
119 if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093
120 if current_app:
121 current_app.logger.warn(
122 f"Popping oldest of {len(codenames)} "
123 f"codenames ({len(serialized)} bytes) to "
124 f"fit within maximum cookie size"
125 )
126 del codenames[list(codenames)[0]] # FIFO
127
128 return fit_codenames_into_cookie(codenames)
129
130 return codenames
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/utils.py b/securedrop/source_app/utils.py
--- a/securedrop/source_app/utils.py
+++ b/securedrop/source_app/utils.py
@@ -23,7 +23,7 @@
"""
message = message.strip()
- return compare_digest(message.strip(), codename)
+ return compare_digest(message.strip().encode("utf-8"), codename.encode("utf-8"))
def flash_msg(
| {"golden_diff": "diff --git a/securedrop/source_app/utils.py b/securedrop/source_app/utils.py\n--- a/securedrop/source_app/utils.py\n+++ b/securedrop/source_app/utils.py\n@@ -23,7 +23,7 @@\n \"\"\"\n message = message.strip()\n \n- return compare_digest(message.strip(), codename)\n+ return compare_digest(message.strip().encode(\"utf-8\"), codename.encode(\"utf-8\"))\n \n \n def flash_msg(\n", "issue": "Initial messages containing non-ascii characters fail if codename filtering is enabled.\n## Description\r\n\r\nCodename filtering was introduced in 2.3.0, allowing admins to block initial submissions containing only the user's codename, as they should not be shared with journalists. The filter uses the `compare_digest()` function to ensure constant-time comparison, but this fn will throw a `TypeError` if any of the strings being compared contain Unicode.\r\n\r\n## Steps to Reproduce\r\n\r\n- start up `make dev` on 2.4.0\r\n- visit the JI and enable codename filtering under Admin > Instance Config\r\n- visit the SI, create a new source, and submit an initial message containing unicode, ie `Hallo! \u00f6, \u00fc, \u00e4, or \u00df`\r\n\r\n## Expected Behavior\r\n- Message is submitted\r\n\r\n## Actual Behavior\r\n- 500 error, and (in dev) stack trace due to TypeError\r\n\r\n## Comments\r\n\r\nSuggestions to fix, any other relevant information.\r\n\n", "before_files": [{"content": "import json\nimport re\nimport subprocess\nimport typing\nfrom hmac import compare_digest\n\nimport werkzeug\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask.sessions import SessionMixin\nfrom flask_babel import gettext\nfrom markupsafe import Markup, escape\nfrom source_user import SourceUser\nfrom store import Storage\n\nif typing.TYPE_CHECKING:\n from typing import Optional\n\n\ndef codename_detected(message: str, codename: str) -> bool:\n \"\"\"\n Check for codenames in incoming messages. including case where user copy/pasted\n from /generate or the codename widget on the same page\n \"\"\"\n message = message.strip()\n\n return compare_digest(message.strip(), codename)\n\n\ndef flash_msg(\n category: str,\n declarative: \"Optional[str]\",\n *msg_contents: \"str\",\n) -> None:\n \"\"\"\n Render flash message with a (currently) optional declarative heading.\n \"\"\"\n contents = Markup(\"<br>\".join([escape(part) for part in msg_contents]))\n\n msg = render_template(\n \"flash_message.html\",\n declarative=declarative,\n msg_contents=contents,\n )\n flash(Markup(msg), category)\n\n\ndef clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:\n msg = render_template(\n \"flash_message.html\",\n declarative=gettext(\"Important\"),\n msg_contents=Markup(\n gettext(\n 'You were logged out due to inactivity. Click the <img src={icon} alt=\"\" '\n 'width=\"16\" height=\"16\"> <b>New Identity</b> button in your Tor Browser\\'s '\n \"toolbar before moving on. This will clear your Tor Browser activity data on \"\n \"this device.\"\n ).format(icon=url_for(\"static\", filename=\"i/torbroom.png\"))\n ),\n )\n\n # Clear the session after we render the message so it's localized\n flask_session.clear()\n\n flash(Markup(msg), \"error\")\n return redirect(url_for(\"main.index\"))\n\n\ndef normalize_timestamps(logged_in_source: SourceUser) -> None:\n \"\"\"\n Update the timestamps on all of the source's submissions. This\n minimizes metadata that could be useful to investigators. See\n #301.\n \"\"\"\n source_in_db = logged_in_source.get_db_record()\n sub_paths = [\n Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)\n for submission in source_in_db.submissions\n ]\n if len(sub_paths) > 1:\n args = [\"touch\", \"--no-create\"]\n args.extend(sub_paths)\n rc = subprocess.call(args)\n if rc != 0:\n current_app.logger.warning(\n \"Couldn't normalize submission \" \"timestamps (touch exited with %d)\" % rc\n )\n\n\ndef check_url_file(path: str, regexp: str) -> \"Optional[str]\":\n \"\"\"\n Check that a file exists at the path given and contains a single line\n matching the regexp. Used for checking the source interface address\n files in /var/lib/securedrop (as the Apache user can't read Tor config)\n \"\"\"\n try:\n f = open(path, \"r\")\n contents = f.readline().strip()\n f.close()\n if re.match(regexp, contents):\n return contents\n else:\n return None\n except IOError:\n return None\n\n\ndef get_sourcev3_url() -> \"Optional[str]\":\n return check_url_file(\"/var/lib/securedrop/source_v3_url\", r\"^[a-z0-9]{56}\\.onion$\")\n\n\ndef fit_codenames_into_cookie(codenames: dict) -> dict:\n \"\"\"\n If `codenames` will approach `werkzeug.Response.max_cookie_size` once\n serialized, incrementally pop off the oldest codename until the remaining\n (newer) ones will fit.\n \"\"\"\n\n serialized = json.dumps(codenames).encode()\n if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093\n if current_app:\n current_app.logger.warn(\n f\"Popping oldest of {len(codenames)} \"\n f\"codenames ({len(serialized)} bytes) to \"\n f\"fit within maximum cookie size\"\n )\n del codenames[list(codenames)[0]] # FIFO\n\n return fit_codenames_into_cookie(codenames)\n\n return codenames\n", "path": "securedrop/source_app/utils.py"}], "after_files": [{"content": "import json\nimport re\nimport subprocess\nimport typing\nfrom hmac import compare_digest\n\nimport werkzeug\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask.sessions import SessionMixin\nfrom flask_babel import gettext\nfrom markupsafe import Markup, escape\nfrom source_user import SourceUser\nfrom store import Storage\n\nif typing.TYPE_CHECKING:\n from typing import Optional\n\n\ndef codename_detected(message: str, codename: str) -> bool:\n \"\"\"\n Check for codenames in incoming messages. including case where user copy/pasted\n from /generate or the codename widget on the same page\n \"\"\"\n message = message.strip()\n\n return compare_digest(message.strip().encode(\"utf-8\"), codename.encode(\"utf-8\"))\n\n\ndef flash_msg(\n category: str,\n declarative: \"Optional[str]\",\n *msg_contents: \"str\",\n) -> None:\n \"\"\"\n Render flash message with a (currently) optional declarative heading.\n \"\"\"\n contents = Markup(\"<br>\".join([escape(part) for part in msg_contents]))\n\n msg = render_template(\n \"flash_message.html\",\n declarative=declarative,\n msg_contents=contents,\n )\n flash(Markup(msg), category)\n\n\ndef clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:\n msg = render_template(\n \"flash_message.html\",\n declarative=gettext(\"Important\"),\n msg_contents=Markup(\n gettext(\n 'You were logged out due to inactivity. Click the <img src={icon} alt=\"\" '\n 'width=\"16\" height=\"16\"> <b>New Identity</b> button in your Tor Browser\\'s '\n \"toolbar before moving on. This will clear your Tor Browser activity data on \"\n \"this device.\"\n ).format(icon=url_for(\"static\", filename=\"i/torbroom.png\"))\n ),\n )\n\n # Clear the session after we render the message so it's localized\n flask_session.clear()\n\n flash(Markup(msg), \"error\")\n return redirect(url_for(\"main.index\"))\n\n\ndef normalize_timestamps(logged_in_source: SourceUser) -> None:\n \"\"\"\n Update the timestamps on all of the source's submissions. This\n minimizes metadata that could be useful to investigators. See\n #301.\n \"\"\"\n source_in_db = logged_in_source.get_db_record()\n sub_paths = [\n Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)\n for submission in source_in_db.submissions\n ]\n if len(sub_paths) > 1:\n args = [\"touch\", \"--no-create\"]\n args.extend(sub_paths)\n rc = subprocess.call(args)\n if rc != 0:\n current_app.logger.warning(\n \"Couldn't normalize submission \" \"timestamps (touch exited with %d)\" % rc\n )\n\n\ndef check_url_file(path: str, regexp: str) -> \"Optional[str]\":\n \"\"\"\n Check that a file exists at the path given and contains a single line\n matching the regexp. Used for checking the source interface address\n files in /var/lib/securedrop (as the Apache user can't read Tor config)\n \"\"\"\n try:\n f = open(path, \"r\")\n contents = f.readline().strip()\n f.close()\n if re.match(regexp, contents):\n return contents\n else:\n return None\n except IOError:\n return None\n\n\ndef get_sourcev3_url() -> \"Optional[str]\":\n return check_url_file(\"/var/lib/securedrop/source_v3_url\", r\"^[a-z0-9]{56}\\.onion$\")\n\n\ndef fit_codenames_into_cookie(codenames: dict) -> dict:\n \"\"\"\n If `codenames` will approach `werkzeug.Response.max_cookie_size` once\n serialized, incrementally pop off the oldest codename until the remaining\n (newer) ones will fit.\n \"\"\"\n\n serialized = json.dumps(codenames).encode()\n if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093\n if current_app:\n current_app.logger.warn(\n f\"Popping oldest of {len(codenames)} \"\n f\"codenames ({len(serialized)} bytes) to \"\n f\"fit within maximum cookie size\"\n )\n del codenames[list(codenames)[0]] # FIFO\n\n return fit_codenames_into_cookie(codenames)\n\n return codenames\n", "path": "securedrop/source_app/utils.py"}]} | 1,729 | 99 |
gh_patches_debug_31532 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-3889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improving documentation for Set2Set layer
### 📚 Describe the documentation issue
I am new to `pytorch_geometric` ecosystem and I was exploring it. At the first glance to the `Set2Set` layer in the [docs](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.Set2Set), it is not clear what the inputs `x` and `batch` are to the forward pass.
If I am not wrong, `x` represents the node features of the graph and `batch` represents a mapping between the node features to their graph identifiers.
### Suggest a potential alternative/fix
I was wondering whether it will be good to include it to the docs or maybe also add typing.
Potential fix in `nn.glob.set2set.py`:
```
def forward(self, x: torch.Tensor, batch: torch.Tensor):
r"""
Args:
x: The input node features.
batch: A one dimension tensor representing a mapping between nodes and their graphs
"""
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/nn/glob/set2set.py`
Content:
```
1 import torch
2 from torch_scatter import scatter_add
3 from torch_geometric.utils import softmax
4
5
6 class Set2Set(torch.nn.Module):
7 r"""The global pooling operator based on iterative content-based attention
8 from the `"Order Matters: Sequence to sequence for sets"
9 <https://arxiv.org/abs/1511.06391>`_ paper
10
11 .. math::
12 \mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1})
13
14 \alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t)
15
16 \mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i
17
18 \mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t,
19
20 where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice
21 the dimensionality as the input.
22
23 Args:
24 in_channels (int): Size of each input sample.
25 processing_steps (int): Number of iterations :math:`T`.
26 num_layers (int, optional): Number of recurrent layers, *.e.g*, setting
27 :obj:`num_layers=2` would mean stacking two LSTMs together to form
28 a stacked LSTM, with the second LSTM taking in outputs of the first
29 LSTM and computing the final results. (default: :obj:`1`)
30 """
31 def __init__(self, in_channels, processing_steps, num_layers=1):
32 super().__init__()
33
34 self.in_channels = in_channels
35 self.out_channels = 2 * in_channels
36 self.processing_steps = processing_steps
37 self.num_layers = num_layers
38
39 self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels,
40 num_layers)
41
42 self.reset_parameters()
43
44 def reset_parameters(self):
45 self.lstm.reset_parameters()
46
47 def forward(self, x, batch):
48 """"""
49 batch_size = batch.max().item() + 1
50
51 h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),
52 x.new_zeros((self.num_layers, batch_size, self.in_channels)))
53 q_star = x.new_zeros(batch_size, self.out_channels)
54
55 for _ in range(self.processing_steps):
56 q, h = self.lstm(q_star.unsqueeze(0), h)
57 q = q.view(batch_size, self.in_channels)
58 e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True)
59 a = softmax(e, batch, num_nodes=batch_size)
60 r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)
61 q_star = torch.cat([q, r], dim=-1)
62
63 return q_star
64
65 def __repr__(self) -> str:
66 return (f'{self.__class__.__name__}({self.in_channels}, '
67 f'{self.out_channels})')
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/nn/glob/set2set.py b/torch_geometric/nn/glob/set2set.py
--- a/torch_geometric/nn/glob/set2set.py
+++ b/torch_geometric/nn/glob/set2set.py
@@ -1,5 +1,9 @@
+from typing import Optional
+
import torch
+from torch import Tensor
from torch_scatter import scatter_add
+
from torch_geometric.utils import softmax
@@ -27,8 +31,17 @@
:obj:`num_layers=2` would mean stacking two LSTMs together to form
a stacked LSTM, with the second LSTM taking in outputs of the first
LSTM and computing the final results. (default: :obj:`1`)
+
+ Shapes:
+ - **input:**
+ node features :math:`(|\mathcal{V}|, F)`,
+ batch vector :math:`(|\mathcal{V}|)` *(optional)*
+ - **output:**
+ set features :math:`(|\mathcal{G}|, 2 * F)` where
+ :math:`|\mathcal{G}|` denotes the number of graphs in the batch
"""
- def __init__(self, in_channels, processing_steps, num_layers=1):
+ def __init__(self, in_channels: int, processing_steps: int,
+ num_layers: int = 1):
super().__init__()
self.in_channels = in_channels
@@ -44,8 +57,16 @@
def reset_parameters(self):
self.lstm.reset_parameters()
- def forward(self, x, batch):
- """"""
+ def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:
+ r"""
+ Args:
+ x (Tensor): The input node features.
+ batch (LongTensor, optional): A vector that maps each node to its
+ respective graph identifier. (default: :obj:`None`)
+ """
+ if batch is None:
+ batch = x.new_zeros(x.size(0), dtype=torch.int64)
+
batch_size = batch.max().item() + 1
h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),
| {"golden_diff": "diff --git a/torch_geometric/nn/glob/set2set.py b/torch_geometric/nn/glob/set2set.py\n--- a/torch_geometric/nn/glob/set2set.py\n+++ b/torch_geometric/nn/glob/set2set.py\n@@ -1,5 +1,9 @@\n+from typing import Optional\n+\n import torch\n+from torch import Tensor\n from torch_scatter import scatter_add\n+\n from torch_geometric.utils import softmax\n \n \n@@ -27,8 +31,17 @@\n :obj:`num_layers=2` would mean stacking two LSTMs together to form\n a stacked LSTM, with the second LSTM taking in outputs of the first\n LSTM and computing the final results. (default: :obj:`1`)\n+\n+ Shapes:\n+ - **input:**\n+ node features :math:`(|\\mathcal{V}|, F)`,\n+ batch vector :math:`(|\\mathcal{V}|)` *(optional)*\n+ - **output:**\n+ set features :math:`(|\\mathcal{G}|, 2 * F)` where\n+ :math:`|\\mathcal{G}|` denotes the number of graphs in the batch\n \"\"\"\n- def __init__(self, in_channels, processing_steps, num_layers=1):\n+ def __init__(self, in_channels: int, processing_steps: int,\n+ num_layers: int = 1):\n super().__init__()\n \n self.in_channels = in_channels\n@@ -44,8 +57,16 @@\n def reset_parameters(self):\n self.lstm.reset_parameters()\n \n- def forward(self, x, batch):\n- \"\"\"\"\"\"\n+ def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:\n+ r\"\"\"\n+ Args:\n+ x (Tensor): The input node features.\n+ batch (LongTensor, optional): A vector that maps each node to its\n+ respective graph identifier. (default: :obj:`None`)\n+ \"\"\"\n+ if batch is None:\n+ batch = x.new_zeros(x.size(0), dtype=torch.int64)\n+\n batch_size = batch.max().item() + 1\n \n h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),\n", "issue": "Improving documentation for Set2Set layer\n### \ud83d\udcda Describe the documentation issue\n\nI am new to `pytorch_geometric` ecosystem and I was exploring it. At the first glance to the `Set2Set` layer in the [docs](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.Set2Set), it is not clear what the inputs `x` and `batch` are to the forward pass.\r\n\r\nIf I am not wrong, `x` represents the node features of the graph and `batch` represents a mapping between the node features to their graph identifiers.\r\n\n\n### Suggest a potential alternative/fix\n\nI was wondering whether it will be good to include it to the docs or maybe also add typing.\r\nPotential fix in `nn.glob.set2set.py`:\r\n```\r\ndef forward(self, x: torch.Tensor, batch: torch.Tensor):\r\n r\"\"\"\r\n Args:\r\n x: The input node features.\r\n batch: A one dimension tensor representing a mapping between nodes and their graphs\r\n \"\"\"\r\n```\n", "before_files": [{"content": "import torch\nfrom torch_scatter import scatter_add\nfrom torch_geometric.utils import softmax\n\n\nclass Set2Set(torch.nn.Module):\n r\"\"\"The global pooling operator based on iterative content-based attention\n from the `\"Order Matters: Sequence to sequence for sets\"\n <https://arxiv.org/abs/1511.06391>`_ paper\n\n .. math::\n \\mathbf{q}_t &= \\mathrm{LSTM}(\\mathbf{q}^{*}_{t-1})\n\n \\alpha_{i,t} &= \\mathrm{softmax}(\\mathbf{x}_i \\cdot \\mathbf{q}_t)\n\n \\mathbf{r}_t &= \\sum_{i=1}^N \\alpha_{i,t} \\mathbf{x}_i\n\n \\mathbf{q}^{*}_t &= \\mathbf{q}_t \\, \\Vert \\, \\mathbf{r}_t,\n\n where :math:`\\mathbf{q}^{*}_T` defines the output of the layer with twice\n the dimensionality as the input.\n\n Args:\n in_channels (int): Size of each input sample.\n processing_steps (int): Number of iterations :math:`T`.\n num_layers (int, optional): Number of recurrent layers, *.e.g*, setting\n :obj:`num_layers=2` would mean stacking two LSTMs together to form\n a stacked LSTM, with the second LSTM taking in outputs of the first\n LSTM and computing the final results. (default: :obj:`1`)\n \"\"\"\n def __init__(self, in_channels, processing_steps, num_layers=1):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = 2 * in_channels\n self.processing_steps = processing_steps\n self.num_layers = num_layers\n\n self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels,\n num_layers)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lstm.reset_parameters()\n\n def forward(self, x, batch):\n \"\"\"\"\"\"\n batch_size = batch.max().item() + 1\n\n h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),\n x.new_zeros((self.num_layers, batch_size, self.in_channels)))\n q_star = x.new_zeros(batch_size, self.out_channels)\n\n for _ in range(self.processing_steps):\n q, h = self.lstm(q_star.unsqueeze(0), h)\n q = q.view(batch_size, self.in_channels)\n e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True)\n a = softmax(e, batch, num_nodes=batch_size)\n r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)\n q_star = torch.cat([q, r], dim=-1)\n\n return q_star\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels})')\n", "path": "torch_geometric/nn/glob/set2set.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch_scatter import scatter_add\n\nfrom torch_geometric.utils import softmax\n\n\nclass Set2Set(torch.nn.Module):\n r\"\"\"The global pooling operator based on iterative content-based attention\n from the `\"Order Matters: Sequence to sequence for sets\"\n <https://arxiv.org/abs/1511.06391>`_ paper\n\n .. math::\n \\mathbf{q}_t &= \\mathrm{LSTM}(\\mathbf{q}^{*}_{t-1})\n\n \\alpha_{i,t} &= \\mathrm{softmax}(\\mathbf{x}_i \\cdot \\mathbf{q}_t)\n\n \\mathbf{r}_t &= \\sum_{i=1}^N \\alpha_{i,t} \\mathbf{x}_i\n\n \\mathbf{q}^{*}_t &= \\mathbf{q}_t \\, \\Vert \\, \\mathbf{r}_t,\n\n where :math:`\\mathbf{q}^{*}_T` defines the output of the layer with twice\n the dimensionality as the input.\n\n Args:\n in_channels (int): Size of each input sample.\n processing_steps (int): Number of iterations :math:`T`.\n num_layers (int, optional): Number of recurrent layers, *.e.g*, setting\n :obj:`num_layers=2` would mean stacking two LSTMs together to form\n a stacked LSTM, with the second LSTM taking in outputs of the first\n LSTM and computing the final results. (default: :obj:`1`)\n\n Shapes:\n - **input:**\n node features :math:`(|\\mathcal{V}|, F)`,\n batch vector :math:`(|\\mathcal{V}|)` *(optional)*\n - **output:**\n set features :math:`(|\\mathcal{G}|, 2 * F)` where\n :math:`|\\mathcal{G}|` denotes the number of graphs in the batch\n \"\"\"\n def __init__(self, in_channels: int, processing_steps: int,\n num_layers: int = 1):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = 2 * in_channels\n self.processing_steps = processing_steps\n self.num_layers = num_layers\n\n self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels,\n num_layers)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lstm.reset_parameters()\n\n def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:\n r\"\"\"\n Args:\n x (Tensor): The input node features.\n batch (LongTensor, optional): A vector that maps each node to its\n respective graph identifier. (default: :obj:`None`)\n \"\"\"\n if batch is None:\n batch = x.new_zeros(x.size(0), dtype=torch.int64)\n\n batch_size = batch.max().item() + 1\n\n h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),\n x.new_zeros((self.num_layers, batch_size, self.in_channels)))\n q_star = x.new_zeros(batch_size, self.out_channels)\n\n for _ in range(self.processing_steps):\n q, h = self.lstm(q_star.unsqueeze(0), h)\n q = q.view(batch_size, self.in_channels)\n e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True)\n a = softmax(e, batch, num_nodes=batch_size)\n r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)\n q_star = torch.cat([q, r], dim=-1)\n\n return q_star\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels})')\n", "path": "torch_geometric/nn/glob/set2set.py"}]} | 1,294 | 507 |
gh_patches_debug_18939 | rasdani/github-patches | git_diff | TileDB-Inc__TileDB-Py-1639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nightly Azure Wheel Fail on Fri, February 3rd 2023
See run for more details:
https://dev.azure.com/TileDB-Inc/CI/_build/results?buildId=$&view=results
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/config.py`
Content:
```
1 # config.py
2 #
3 # LICENSE
4 #
5 # The MIT License
6 #
7 # Copyright (c) 2020 TileDB, Inc.
8 #
9 # Permission is hereby granted, free of charge, to any person obtaining a copy
10 # of this software and associated documentation files (the "Software"), to deal
11 # in the Software without restriction, including without limitation the rights
12 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 # copies of the Software, and to permit persons to whom the Software is
14 # furnished to do so, subject to the following conditions:
15 #
16 # The above copyright notice and this permission notice shall be included in
17 # all copies or substantial portions of the Software.
18 #
19 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 # THE SOFTWARE.
26 #
27 # DESCRIPTION
28 #
29 # Please see the TileDB documentation for more information:
30 # https://docs.tiledb.com/main/how-to/configuration
31 #
32 # This program shows how to set/get the TileDB configuration parameters.
33 #
34
35 import tiledb
36
37
38 def set_get_config_ctx_vfs():
39 # Create config object
40 config = tiledb.Config()
41
42 # Set/get config to/from ctx
43 ctx = tiledb.Ctx(config)
44 print(ctx.config())
45
46 # Set/get config to/from VFS
47 vfs = tiledb.VFS(config)
48 print(vfs.config())
49
50
51 def set_get_config():
52 config = tiledb.Config()
53
54 # Set value
55 config["vfs.s3.connect_timeout_ms"] = 5000
56
57 # Get value
58 tile_cache_size = config["sm.tile_cache_size"]
59 print("Tile cache size: %s" % str(tile_cache_size))
60
61
62 def print_default():
63 config = tiledb.Config()
64 print("\nDefault settings:")
65 for p in config.items():
66 print('"%s" : "%s"' % (p[0], p[1]))
67
68
69 def iter_config_with_prefix():
70 config = tiledb.Config()
71 # Print only the S3 settings.
72 print("\nVFS S3 settings:")
73 for p in config.items("vfs.s3."):
74 print('"%s" : "%s"' % (p[0], p[1]))
75
76
77 def save_load_config():
78 # Save to file
79 config = tiledb.Config()
80 config["sm.tile_cache_size"] = 0
81 config.save("tiledb_config.txt")
82
83 # Load from file
84 config_load = tiledb.Config.load("tiledb_config.txt")
85 print(
86 "\nTile cache size after loading from file: %s"
87 % str(config_load["sm.tile_cache_size"])
88 )
89
90
91 set_get_config_ctx_vfs()
92 set_get_config()
93 print_default()
94 iter_config_with_prefix()
95 save_load_config()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/config.py b/examples/config.py
--- a/examples/config.py
+++ b/examples/config.py
@@ -55,8 +55,8 @@
config["vfs.s3.connect_timeout_ms"] = 5000
# Get value
- tile_cache_size = config["sm.tile_cache_size"]
- print("Tile cache size: %s" % str(tile_cache_size))
+ tile_cache_size = config["sm.memory_budget"]
+ print("Memory budget: %s" % str(tile_cache_size))
def print_default():
@@ -77,14 +77,14 @@
def save_load_config():
# Save to file
config = tiledb.Config()
- config["sm.tile_cache_size"] = 0
+ config["sm.memory_budget"] = 1234
config.save("tiledb_config.txt")
# Load from file
config_load = tiledb.Config.load("tiledb_config.txt")
print(
"\nTile cache size after loading from file: %s"
- % str(config_load["sm.tile_cache_size"])
+ % str(config_load["sm.memory_budget"])
)
| {"golden_diff": "diff --git a/examples/config.py b/examples/config.py\n--- a/examples/config.py\n+++ b/examples/config.py\n@@ -55,8 +55,8 @@\n config[\"vfs.s3.connect_timeout_ms\"] = 5000\n \n # Get value\n- tile_cache_size = config[\"sm.tile_cache_size\"]\n- print(\"Tile cache size: %s\" % str(tile_cache_size))\n+ tile_cache_size = config[\"sm.memory_budget\"]\n+ print(\"Memory budget: %s\" % str(tile_cache_size))\n \n \n def print_default():\n@@ -77,14 +77,14 @@\n def save_load_config():\n # Save to file\n config = tiledb.Config()\n- config[\"sm.tile_cache_size\"] = 0\n+ config[\"sm.memory_budget\"] = 1234\n config.save(\"tiledb_config.txt\")\n \n # Load from file\n config_load = tiledb.Config.load(\"tiledb_config.txt\")\n print(\n \"\\nTile cache size after loading from file: %s\"\n- % str(config_load[\"sm.tile_cache_size\"])\n+ % str(config_load[\"sm.memory_budget\"])\n )\n", "issue": "Nightly Azure Wheel Fail on Fri, February 3rd 2023\nSee run for more details:\nhttps://dev.azure.com/TileDB-Inc/CI/_build/results?buildId=$&view=results\n", "before_files": [{"content": "# config.py\n#\n# LICENSE\n#\n# The MIT License\n#\n# Copyright (c) 2020 TileDB, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# DESCRIPTION\n#\n# Please see the TileDB documentation for more information:\n# https://docs.tiledb.com/main/how-to/configuration\n#\n# This program shows how to set/get the TileDB configuration parameters.\n#\n\nimport tiledb\n\n\ndef set_get_config_ctx_vfs():\n # Create config object\n config = tiledb.Config()\n\n # Set/get config to/from ctx\n ctx = tiledb.Ctx(config)\n print(ctx.config())\n\n # Set/get config to/from VFS\n vfs = tiledb.VFS(config)\n print(vfs.config())\n\n\ndef set_get_config():\n config = tiledb.Config()\n\n # Set value\n config[\"vfs.s3.connect_timeout_ms\"] = 5000\n\n # Get value\n tile_cache_size = config[\"sm.tile_cache_size\"]\n print(\"Tile cache size: %s\" % str(tile_cache_size))\n\n\ndef print_default():\n config = tiledb.Config()\n print(\"\\nDefault settings:\")\n for p in config.items():\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef iter_config_with_prefix():\n config = tiledb.Config()\n # Print only the S3 settings.\n print(\"\\nVFS S3 settings:\")\n for p in config.items(\"vfs.s3.\"):\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef save_load_config():\n # Save to file\n config = tiledb.Config()\n config[\"sm.tile_cache_size\"] = 0\n config.save(\"tiledb_config.txt\")\n\n # Load from file\n config_load = tiledb.Config.load(\"tiledb_config.txt\")\n print(\n \"\\nTile cache size after loading from file: %s\"\n % str(config_load[\"sm.tile_cache_size\"])\n )\n\n\nset_get_config_ctx_vfs()\nset_get_config()\nprint_default()\niter_config_with_prefix()\nsave_load_config()\n", "path": "examples/config.py"}], "after_files": [{"content": "# config.py\n#\n# LICENSE\n#\n# The MIT License\n#\n# Copyright (c) 2020 TileDB, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# DESCRIPTION\n#\n# Please see the TileDB documentation for more information:\n# https://docs.tiledb.com/main/how-to/configuration\n#\n# This program shows how to set/get the TileDB configuration parameters.\n#\n\nimport tiledb\n\n\ndef set_get_config_ctx_vfs():\n # Create config object\n config = tiledb.Config()\n\n # Set/get config to/from ctx\n ctx = tiledb.Ctx(config)\n print(ctx.config())\n\n # Set/get config to/from VFS\n vfs = tiledb.VFS(config)\n print(vfs.config())\n\n\ndef set_get_config():\n config = tiledb.Config()\n\n # Set value\n config[\"vfs.s3.connect_timeout_ms\"] = 5000\n\n # Get value\n tile_cache_size = config[\"sm.memory_budget\"]\n print(\"Memory budget: %s\" % str(tile_cache_size))\n\n\ndef print_default():\n config = tiledb.Config()\n print(\"\\nDefault settings:\")\n for p in config.items():\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef iter_config_with_prefix():\n config = tiledb.Config()\n # Print only the S3 settings.\n print(\"\\nVFS S3 settings:\")\n for p in config.items(\"vfs.s3.\"):\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef save_load_config():\n # Save to file\n config = tiledb.Config()\n config[\"sm.memory_budget\"] = 1234\n config.save(\"tiledb_config.txt\")\n\n # Load from file\n config_load = tiledb.Config.load(\"tiledb_config.txt\")\n print(\n \"\\nTile cache size after loading from file: %s\"\n % str(config_load[\"sm.memory_budget\"])\n )\n\n\nset_get_config_ctx_vfs()\nset_get_config()\nprint_default()\niter_config_with_prefix()\nsave_load_config()\n", "path": "examples/config.py"}]} | 1,162 | 253 |
gh_patches_debug_37263 | rasdani/github-patches | git_diff | fidals__shopelectro-725 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make SiteDriver a context manager
It will provide convenient api to manage selenium resources and properly shout down selenium driver.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/tasks.py`
Content:
```
1 from contextlib import contextmanager
2
3 from django.conf import settings
4 from django.core.management import call_command
5 from selenium.common.exceptions import WebDriverException
6
7 from shopelectro import selenium
8 from shopelectro.celery import app
9 from shopelectro.report import TelegramReport
10 from shopelectro.models import CategoryPage
11 from shopelectro.management.commands._update_catalog import utils
12
13
14 @contextmanager
15 def report():
16 try:
17 yield
18 except Exception as error:
19 utils.report(str(error))
20 raise error
21
22
23 @app.task
24 def generate_price_files():
25 with report():
26 call_command('price')
27 print('Generate prices complete.')
28
29
30 @app.task
31 def generate_excel_file():
32 with report():
33 call_command('excel')
34 print('Generate excel complete.')
35
36
37 @app.task
38 def collect_static():
39 with report():
40 call_command('collectstatic', '--noinput')
41
42
43 @app.task
44 def update_catalog_command():
45 with report():
46 call_command('update_catalog')
47
48
49 @app.task
50 def update_default_templates():
51 with report():
52 call_command('update_default_templates')
53
54
55 @app.task(autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)
56 def update_catalog():
57 # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap
58 return [
59 update_catalog_command(),
60 update_default_templates(),
61 generate_price_files(),
62 generate_excel_file(),
63 collect_static()
64 ]
65
66 # @todo #690:30m Schedule check_purchase in the celery beat.
67
68
69 @app.task(
70 bind=True,
71 autoretry_for=(WebDriverException, AssertionError),
72 retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},
73 )
74 def check_purchase(self):
75 try:
76 driver = selenium.SiteDriver(site_url=settings.BASE_URL)
77 category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
78 category_page.load()
79 category_page.add_to_cart()
80
81 order_page = selenium.OrderPage(driver)
82 order_page.load()
83 order_page.fill_contacts()
84 order_page.make_order()
85
86 success_page = selenium.SuccessPage(driver)
87 assert success_page.is_success()
88 except (WebDriverException, AssertionError) as err:
89 if self.request.retries + 1 > self.max_retries:
90 # report on the last attempt
91 TelegramReport().send(f'Can\'t buy a product. Got the error: {err}')
92 raise err
93
```
Path: `shopelectro/selenium/driver.py`
Content:
```
1 from urllib.parse import urljoin
2
3 from django.conf import settings
4 from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
5 from selenium.webdriver.support.ui import WebDriverWait
6 from seleniumrequests import Remote
7
8
9 class SiteDriver(Remote):
10 """Provide convenient access to the site."""
11
12 def __init__(self, *, site_url, **kwargs):
13 kwargs.setdefault('command_executor', settings.SELENIUM_URL)
14 kwargs.setdefault('desired_capabilities', DesiredCapabilities.CHROME)
15 super().__init__(**kwargs)
16
17 self.site_url = site_url
18 self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)
19
20 def get(self, url):
21 super().get(urljoin(self.site_url, url))
22
```
Path: `shopelectro/celery.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2 from datetime import timedelta
3 import os
4
5 from celery import Celery
6 from kombu import Exchange, Queue
7
8 # set the default Django settings module for the 'celery' program.
9 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')
10
11 app = Celery('shopelectro')
12
13 # Exchanges
14 default_exchange = Exchange('default', type='direct')
15 utils_exchange = Exchange('utils', type='direct')
16
17 # http://docs.celeryproject.org/en/latest/userguide/tasks.html
18 task_queues = (
19 Queue(
20 name='default',
21 exchange=default_exchange,
22 routing_key='default',
23 ),
24 Queue(
25 name='mail',
26 exchange=utils_exchange,
27 routing_key='utils.mail',
28 ),
29 Queue(
30 name='command',
31 exchange=utils_exchange,
32 routing_key='utils.command',
33 )
34 )
35
36 # http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html
37 beat_schedule = {
38 'update-catalog': {
39 'task': 'shopelectro.tasks.update_catalog',
40 'schedule': timedelta(hours=2),
41 },
42 }
43
44 # http://docs.celeryproject.org/en/master/userguide/routing.html
45 task_routes = {
46 'shopelectro.tasks.update_catalog': {
47 'queue': 'command',
48 'routing_key': 'utils.command',
49 'priority': 30,
50 },
51 'ecommerce.tasks.send_mail': {
52 'queue': 'mail',
53 'routing_key': 'utils.mail',
54 'priority': 50,
55 },
56 }
57
58 # Using a string here means the worker don't have to serialize
59 # the configuration object to child processes.
60 # - namespace='CELERY' means all celery-related configuration keys
61 # should have a `CELERY_` prefix.
62 app.config_from_object('django.conf:settings', namespace='CELERY')
63
64 # http://docs.celeryproject.org/en/latest/userguide/configuration.html
65
66 BROCKER_URL = (
67 f'amqp://{os.environ["RABBITMQ_DEFAULT_USER"]}:{os.environ["RABBITMQ_DEFAULT_PASS"]}'
68 f'@{os.environ["RABBITMQ_URL"]}:{os.environ["RABBITMQ_PORT"]}/'
69 )
70 app.conf.update(
71 broker_url=BROCKER_URL,
72 broker_heartbeat=30,
73 task_acks_late=True,
74 task_default_queue='default',
75 task_default_exchange='default',
76 task_default_routing_key='default',
77 task_ignore_result=True,
78 worker_pool_restarts=1000,
79 task_routes=task_routes,
80 task_queues=task_queues,
81 beat_schedule=beat_schedule,
82 )
83
84 # Load task modules from all registered Django app configs.
85 app.autodiscover_tasks()
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/celery.py b/shopelectro/celery.py
--- a/shopelectro/celery.py
+++ b/shopelectro/celery.py
@@ -37,7 +37,11 @@
beat_schedule = {
'update-catalog': {
'task': 'shopelectro.tasks.update_catalog',
- 'schedule': timedelta(hours=2),
+ 'schedule': timedelta(hours=2).total_seconds(),
+ },
+ 'check-purchase': {
+ 'task': 'shopelectro.tasks.check_purchase',
+ 'schedule': timedelta(days=1).total_seconds(),
},
}
@@ -48,6 +52,11 @@
'routing_key': 'utils.command',
'priority': 30,
},
+ 'shopelectro.tasks.check_purchase': {
+ 'queue': 'command',
+ 'routing_key': 'utils.command',
+ 'priority': 20,
+ },
'ecommerce.tasks.send_mail': {
'queue': 'mail',
'routing_key': 'utils.mail',
diff --git a/shopelectro/selenium/driver.py b/shopelectro/selenium/driver.py
--- a/shopelectro/selenium/driver.py
+++ b/shopelectro/selenium/driver.py
@@ -17,5 +17,11 @@
self.site_url = site_url
self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.quit()
+
def get(self, url):
super().get(urljoin(self.site_url, url))
diff --git a/shopelectro/tasks.py b/shopelectro/tasks.py
--- a/shopelectro/tasks.py
+++ b/shopelectro/tasks.py
@@ -63,8 +63,6 @@
collect_static()
]
-# @todo #690:30m Schedule check_purchase in the celery beat.
-
@app.task(
bind=True,
@@ -73,18 +71,18 @@
)
def check_purchase(self):
try:
- driver = selenium.SiteDriver(site_url=settings.BASE_URL)
- category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
- category_page.load()
- category_page.add_to_cart()
-
- order_page = selenium.OrderPage(driver)
- order_page.load()
- order_page.fill_contacts()
- order_page.make_order()
-
- success_page = selenium.SuccessPage(driver)
- assert success_page.is_success()
+ with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:
+ category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
+ category_page.load()
+ category_page.add_to_cart()
+
+ order_page = selenium.OrderPage(driver)
+ order_page.load()
+ order_page.fill_contacts()
+ order_page.make_order()
+
+ success_page = selenium.SuccessPage(driver)
+ assert success_page.is_success()
except (WebDriverException, AssertionError) as err:
if self.request.retries + 1 > self.max_retries:
# report on the last attempt
| {"golden_diff": "diff --git a/shopelectro/celery.py b/shopelectro/celery.py\n--- a/shopelectro/celery.py\n+++ b/shopelectro/celery.py\n@@ -37,7 +37,11 @@\n beat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n- 'schedule': timedelta(hours=2),\n+ 'schedule': timedelta(hours=2).total_seconds(),\n+ },\n+ 'check-purchase': {\n+ 'task': 'shopelectro.tasks.check_purchase',\n+ 'schedule': timedelta(days=1).total_seconds(),\n },\n }\n \n@@ -48,6 +52,11 @@\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n+ 'shopelectro.tasks.check_purchase': {\n+ 'queue': 'command',\n+ 'routing_key': 'utils.command',\n+ 'priority': 20,\n+ },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\ndiff --git a/shopelectro/selenium/driver.py b/shopelectro/selenium/driver.py\n--- a/shopelectro/selenium/driver.py\n+++ b/shopelectro/selenium/driver.py\n@@ -17,5 +17,11 @@\n self.site_url = site_url\n self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)\n \n+ def __enter__(self):\n+ return self\n+\n+ def __exit__(self, *args, **kwargs):\n+ self.quit()\n+\n def get(self, url):\n super().get(urljoin(self.site_url, url))\ndiff --git a/shopelectro/tasks.py b/shopelectro/tasks.py\n--- a/shopelectro/tasks.py\n+++ b/shopelectro/tasks.py\n@@ -63,8 +63,6 @@\n collect_static()\n ]\n \n-# @todo #690:30m Schedule check_purchase in the celery beat.\n-\n \n @app.task(\n bind=True,\n@@ -73,18 +71,18 @@\n )\n def check_purchase(self):\n try:\n- driver = selenium.SiteDriver(site_url=settings.BASE_URL)\n- category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n- category_page.load()\n- category_page.add_to_cart()\n-\n- order_page = selenium.OrderPage(driver)\n- order_page.load()\n- order_page.fill_contacts()\n- order_page.make_order()\n-\n- success_page = selenium.SuccessPage(driver)\n- assert success_page.is_success()\n+ with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:\n+ category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n+ category_page.load()\n+ category_page.add_to_cart()\n+\n+ order_page = selenium.OrderPage(driver)\n+ order_page.load()\n+ order_page.fill_contacts()\n+ order_page.make_order()\n+\n+ success_page = selenium.SuccessPage(driver)\n+ assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n", "issue": "Make SiteDriver a context manager\nIt will provide convenient api to manage selenium resources and properly shout down selenium driver. \n", "before_files": [{"content": "from contextlib import contextmanager\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom selenium.common.exceptions import WebDriverException\n\nfrom shopelectro import selenium\nfrom shopelectro.celery import app\nfrom shopelectro.report import TelegramReport\nfrom shopelectro.models import CategoryPage\nfrom shopelectro.management.commands._update_catalog import utils\n\n\n@contextmanager\ndef report():\n try:\n yield\n except Exception as error:\n utils.report(str(error))\n raise error\n\n\[email protected]\ndef generate_price_files():\n with report():\n call_command('price')\n print('Generate prices complete.')\n\n\[email protected]\ndef generate_excel_file():\n with report():\n call_command('excel')\n print('Generate excel complete.')\n\n\[email protected]\ndef collect_static():\n with report():\n call_command('collectstatic', '--noinput')\n\n\[email protected]\ndef update_catalog_command():\n with report():\n call_command('update_catalog')\n\n\[email protected]\ndef update_default_templates():\n with report():\n call_command('update_default_templates')\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_catalog():\n # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap\n return [\n update_catalog_command(),\n update_default_templates(),\n generate_price_files(),\n generate_excel_file(),\n collect_static()\n ]\n\n# @todo #690:30m Schedule check_purchase in the celery beat.\n\n\[email protected](\n bind=True,\n autoretry_for=(WebDriverException, AssertionError),\n retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},\n)\ndef check_purchase(self):\n try:\n driver = selenium.SiteDriver(site_url=settings.BASE_URL)\n category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n category_page.load()\n category_page.add_to_cart()\n\n order_page = selenium.OrderPage(driver)\n order_page.load()\n order_page.fill_contacts()\n order_page.make_order()\n\n success_page = selenium.SuccessPage(driver)\n assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n TelegramReport().send(f'Can\\'t buy a product. Got the error: {err}')\n raise err\n", "path": "shopelectro/tasks.py"}, {"content": "from urllib.parse import urljoin\n\nfrom django.conf import settings\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom seleniumrequests import Remote\n\n\nclass SiteDriver(Remote):\n \"\"\"Provide convenient access to the site.\"\"\"\n\n def __init__(self, *, site_url, **kwargs):\n kwargs.setdefault('command_executor', settings.SELENIUM_URL)\n kwargs.setdefault('desired_capabilities', DesiredCapabilities.CHROME)\n super().__init__(**kwargs)\n\n self.site_url = site_url\n self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)\n\n def get(self, url):\n super().get(urljoin(self.site_url, url))\n", "path": "shopelectro/selenium/driver.py"}, {"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n task_ignore_result=True,\n worker_pool_restarts=1000,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}], "after_files": [{"content": "from contextlib import contextmanager\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom selenium.common.exceptions import WebDriverException\n\nfrom shopelectro import selenium\nfrom shopelectro.celery import app\nfrom shopelectro.report import TelegramReport\nfrom shopelectro.models import CategoryPage\nfrom shopelectro.management.commands._update_catalog import utils\n\n\n@contextmanager\ndef report():\n try:\n yield\n except Exception as error:\n utils.report(str(error))\n raise error\n\n\[email protected]\ndef generate_price_files():\n with report():\n call_command('price')\n print('Generate prices complete.')\n\n\[email protected]\ndef generate_excel_file():\n with report():\n call_command('excel')\n print('Generate excel complete.')\n\n\[email protected]\ndef collect_static():\n with report():\n call_command('collectstatic', '--noinput')\n\n\[email protected]\ndef update_catalog_command():\n with report():\n call_command('update_catalog')\n\n\[email protected]\ndef update_default_templates():\n with report():\n call_command('update_default_templates')\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_catalog():\n # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap\n return [\n update_catalog_command(),\n update_default_templates(),\n generate_price_files(),\n generate_excel_file(),\n collect_static()\n ]\n\n\[email protected](\n bind=True,\n autoretry_for=(WebDriverException, AssertionError),\n retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},\n)\ndef check_purchase(self):\n try:\n with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:\n category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n category_page.load()\n category_page.add_to_cart()\n\n order_page = selenium.OrderPage(driver)\n order_page.load()\n order_page.fill_contacts()\n order_page.make_order()\n\n success_page = selenium.SuccessPage(driver)\n assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n TelegramReport().send(f'Can\\'t buy a product. Got the error: {err}')\n raise err\n", "path": "shopelectro/tasks.py"}, {"content": "from urllib.parse import urljoin\n\nfrom django.conf import settings\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom seleniumrequests import Remote\n\n\nclass SiteDriver(Remote):\n \"\"\"Provide convenient access to the site.\"\"\"\n\n def __init__(self, *, site_url, **kwargs):\n kwargs.setdefault('command_executor', settings.SELENIUM_URL)\n kwargs.setdefault('desired_capabilities', DesiredCapabilities.CHROME)\n super().__init__(**kwargs)\n\n self.site_url = site_url\n self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.quit()\n\n def get(self, url):\n super().get(urljoin(self.site_url, url))\n", "path": "shopelectro/selenium/driver.py"}, {"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 20,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n task_ignore_result=True,\n worker_pool_restarts=1000,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}]} | 1,991 | 715 |
gh_patches_debug_53387 | rasdani/github-patches | git_diff | chainer__chainer-781 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support numpy 1.10
numpy 1.10.0 is released on 2015/10/07
https://pypi.python.org/pypi/numpy/1.10.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/creation/ranges.py`
Content:
```
1 import numpy
2
3 import cupy
4 from cupy import core
5
6
7 def arange(start, stop=None, step=1, dtype=None):
8 """Rerurns an array with evenly spaced values within a given interval.
9
10 Values are generated within the half-open interval [start, stop). The first
11 three arguments are mapped like the ``range`` built-in function, i.e. start
12 and step are optional.
13
14 Args:
15 start: Start of the interval.
16 stop: End of the interval.
17 step: Step width between each pair of consecutive values.
18 dtype: Data type specifier. It is inferred from other arguments by
19 default.
20
21 Returns:
22 cupy.ndarray: The 1-D array of range values.
23
24 .. seealso:: :func:`numpy.arange`
25
26 """
27 if dtype is None:
28 if any(numpy.dtype(type(val)).kind == 'f'
29 for val in (start, stop, step)):
30 dtype = float
31 else:
32 dtype = int
33
34 if stop is None:
35 stop = start
36 start = 0
37 size = int(numpy.ceil((stop - start) / step))
38 if size <= 0:
39 return cupy.empty((0,), dtype=dtype)
40
41 ret = cupy.empty((size,), dtype=dtype)
42 typ = numpy.dtype(dtype).type
43 _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)
44 return ret
45
46
47 def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
48 """Returns an array with evenly-spaced values within a given interval.
49
50 Instead of specifying the step width like :func:`cupy.arange`, this
51 function requires the total number of elements specified.
52
53 Args:
54 start: Start of the interval.
55 stop: End of the interval.
56 num: Number of elements.
57 endpoint (bool): If True, the stop value is included as the last
58 element. Otherwise, the stop value is omitted.
59 retstep (bool): If True, this function returns (array, step).
60 Otherwise, it returns only the array.
61 dtype: Data type specifier. It is inferred from the start and stop
62 arguments by default.
63
64 Returns:
65 cupy.ndarray: The 1-D array of ranged values.
66
67 """
68 if num < 0:
69 raise ValueError('linspace with num<0 is not supported')
70
71 if dtype is None:
72 # In actual implementation, only float is used
73 dtype = float
74
75 ret = cupy.empty((num,), dtype=dtype)
76 if num == 0:
77 step = float('nan')
78 elif num == 1:
79 ret.fill(start)
80 step = float('nan')
81 else:
82 div = (num - 1) if endpoint else num
83 step = float(stop - start) / div
84 stop = float(stop)
85
86 if step == 0.0:
87 # for underflow
88 _linspace_ufunc_underflow(start, stop - start, div, ret)
89 else:
90 _linspace_ufunc(start, step, ret)
91
92 if endpoint:
93 ret[-1] = stop
94
95 if retstep:
96 return ret, step
97 else:
98 return ret
99
100
101 # TODO(okuta): Implement logspace
102
103
104 # TODO(okuta): Implement meshgrid
105
106
107 # mgrid
108 # ogrid
109
110
111 _arange_ufunc = core.create_ufunc(
112 'cupy_arange',
113 ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
114 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
115 'out0 = in0 + i * in1')
116
117
118 _linspace_ufunc = core.create_ufunc(
119 'cupy_linspace',
120 ('dd->d',),
121 'out0 = in0 + i * in1')
122
123 _linspace_ufunc_underflow = core.create_ufunc(
124 'cupy_linspace',
125 ('ddd->d',),
126 'out0 = in0 + i * in1 / in2')
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/creation/ranges.py b/cupy/creation/ranges.py
--- a/cupy/creation/ranges.py
+++ b/cupy/creation/ranges.py
@@ -85,9 +85,10 @@
if step == 0.0:
# for underflow
- _linspace_ufunc_underflow(start, stop - start, div, ret)
+ _linspace_ufunc_underflow(start, stop - start, div, ret,
+ casting='unsafe')
else:
- _linspace_ufunc(start, step, ret)
+ _linspace_ufunc(start, step, ret, casting='unsafe')
if endpoint:
ret[-1] = stop
| {"golden_diff": "diff --git a/cupy/creation/ranges.py b/cupy/creation/ranges.py\n--- a/cupy/creation/ranges.py\n+++ b/cupy/creation/ranges.py\n@@ -85,9 +85,10 @@\n \n if step == 0.0:\n # for underflow\n- _linspace_ufunc_underflow(start, stop - start, div, ret)\n+ _linspace_ufunc_underflow(start, stop - start, div, ret,\n+ casting='unsafe')\n else:\n- _linspace_ufunc(start, step, ret)\n+ _linspace_ufunc(start, step, ret, casting='unsafe')\n \n if endpoint:\n ret[-1] = stop\n", "issue": "Support numpy 1.10\nnumpy 1.10.0 is released on 2015/10/07\n\nhttps://pypi.python.org/pypi/numpy/1.10.0\n\n", "before_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n \"\"\"Rerurns an array with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop). The first\n three arguments are mapped like the ``range`` built-in function, i.e. start\n and step are optional.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n step: Step width between each pair of consecutive values.\n dtype: Data type specifier. It is inferred from other arguments by\n default.\n\n Returns:\n cupy.ndarray: The 1-D array of range values.\n\n .. seealso:: :func:`numpy.arange`\n\n \"\"\"\n if dtype is None:\n if any(numpy.dtype(type(val)).kind == 'f'\n for val in (start, stop, step)):\n dtype = float\n else:\n dtype = int\n\n if stop is None:\n stop = start\n start = 0\n size = int(numpy.ceil((stop - start) / step))\n if size <= 0:\n return cupy.empty((0,), dtype=dtype)\n\n ret = cupy.empty((size,), dtype=dtype)\n typ = numpy.dtype(dtype).type\n _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)\n return ret\n\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):\n \"\"\"Returns an array with evenly-spaced values within a given interval.\n\n Instead of specifying the step width like :func:`cupy.arange`, this\n function requires the total number of elements specified.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n num: Number of elements.\n endpoint (bool): If True, the stop value is included as the last\n element. Otherwise, the stop value is omitted.\n retstep (bool): If True, this function returns (array, step).\n Otherwise, it returns only the array.\n dtype: Data type specifier. It is inferred from the start and stop\n arguments by default.\n\n Returns:\n cupy.ndarray: The 1-D array of ranged values.\n\n \"\"\"\n if num < 0:\n raise ValueError('linspace with num<0 is not supported')\n\n if dtype is None:\n # In actual implementation, only float is used\n dtype = float\n\n ret = cupy.empty((num,), dtype=dtype)\n if num == 0:\n step = float('nan')\n elif num == 1:\n ret.fill(start)\n step = float('nan')\n else:\n div = (num - 1) if endpoint else num\n step = float(stop - start) / div\n stop = float(stop)\n\n if step == 0.0:\n # for underflow\n _linspace_ufunc_underflow(start, stop - start, div, ret)\n else:\n _linspace_ufunc(start, step, ret)\n\n if endpoint:\n ret[-1] = stop\n\n if retstep:\n return ret, step\n else:\n return ret\n\n\n# TODO(okuta): Implement logspace\n\n\n# TODO(okuta): Implement meshgrid\n\n\n# mgrid\n# ogrid\n\n\n_arange_ufunc = core.create_ufunc(\n 'cupy_arange',\n ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',\n 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = in0 + i * in1')\n\n\n_linspace_ufunc = core.create_ufunc(\n 'cupy_linspace',\n ('dd->d',),\n 'out0 = in0 + i * in1')\n\n_linspace_ufunc_underflow = core.create_ufunc(\n 'cupy_linspace',\n ('ddd->d',),\n 'out0 = in0 + i * in1 / in2')\n", "path": "cupy/creation/ranges.py"}], "after_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n \"\"\"Rerurns an array with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop). The first\n three arguments are mapped like the ``range`` built-in function, i.e. start\n and step are optional.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n step: Step width between each pair of consecutive values.\n dtype: Data type specifier. It is inferred from other arguments by\n default.\n\n Returns:\n cupy.ndarray: The 1-D array of range values.\n\n .. seealso:: :func:`numpy.arange`\n\n \"\"\"\n if dtype is None:\n if any(numpy.dtype(type(val)).kind == 'f'\n for val in (start, stop, step)):\n dtype = float\n else:\n dtype = int\n\n if stop is None:\n stop = start\n start = 0\n size = int(numpy.ceil((stop - start) / step))\n if size <= 0:\n return cupy.empty((0,), dtype=dtype)\n\n ret = cupy.empty((size,), dtype=dtype)\n typ = numpy.dtype(dtype).type\n _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)\n return ret\n\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):\n \"\"\"Returns an array with evenly-spaced values within a given interval.\n\n Instead of specifying the step width like :func:`cupy.arange`, this\n function requires the total number of elements specified.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n num: Number of elements.\n endpoint (bool): If True, the stop value is included as the last\n element. Otherwise, the stop value is omitted.\n retstep (bool): If True, this function returns (array, step).\n Otherwise, it returns only the array.\n dtype: Data type specifier. It is inferred from the start and stop\n arguments by default.\n\n Returns:\n cupy.ndarray: The 1-D array of ranged values.\n\n \"\"\"\n if num < 0:\n raise ValueError('linspace with num<0 is not supported')\n\n if dtype is None:\n # In actual implementation, only float is used\n dtype = float\n\n ret = cupy.empty((num,), dtype=dtype)\n if num == 0:\n step = float('nan')\n elif num == 1:\n ret.fill(start)\n step = float('nan')\n else:\n div = (num - 1) if endpoint else num\n step = float(stop - start) / div\n stop = float(stop)\n\n if step == 0.0:\n # for underflow\n _linspace_ufunc_underflow(start, stop - start, div, ret,\n casting='unsafe')\n else:\n _linspace_ufunc(start, step, ret, casting='unsafe')\n\n if endpoint:\n ret[-1] = stop\n\n if retstep:\n return ret, step\n else:\n return ret\n\n\n# TODO(okuta): Implement logspace\n\n\n# TODO(okuta): Implement meshgrid\n\n\n# mgrid\n# ogrid\n\n\n_arange_ufunc = core.create_ufunc(\n 'cupy_arange',\n ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',\n 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = in0 + i * in1')\n\n\n_linspace_ufunc = core.create_ufunc(\n 'cupy_linspace',\n ('dd->d',),\n 'out0 = in0 + i * in1')\n\n_linspace_ufunc_underflow = core.create_ufunc(\n 'cupy_linspace',\n ('ddd->d',),\n 'out0 = in0 + i * in1 / in2')\n", "path": "cupy/creation/ranges.py"}]} | 1,516 | 163 |
gh_patches_debug_1201 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-588 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No way to define options that have no defaults
Currently if you set a value in `cookiecutter.json` to `null` it becomes `None` and is then turned into the _string_ `'None'`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/prompt.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.prompt
6 ---------------------
7
8 Functions for prompting the user for project info.
9 """
10
11 from collections import OrderedDict
12
13 import click
14 from past.builtins import basestring
15
16 from future.utils import iteritems
17 from jinja2.environment import Environment
18
19
20 def read_user_variable(var_name, default_value):
21 """Prompt the user for the given variable and return the entered value
22 or the given default.
23
24 :param str var_name: Variable of the context to query the user
25 :param default_value: Value that will be returned if no input happens
26 """
27 # Please see http://click.pocoo.org/4/api/#click.prompt
28 return click.prompt(var_name, default=default_value)
29
30
31 def read_user_yes_no(question, default_value):
32 """Prompt the user to reply with 'yes' or 'no' (or equivalent values).
33
34 Note:
35 Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
36
37 :param str question: Question to the user
38 :param default_value: Value that will be returned if no input happens
39 """
40 # Please see http://click.pocoo.org/4/api/#click.prompt
41 return click.prompt(
42 question,
43 default=default_value,
44 type=click.BOOL
45 )
46
47
48 def read_user_choice(var_name, options):
49 """Prompt the user to choose from several options for the given variable.
50
51 The first item will be returned if no input happens.
52
53 :param str var_name: Variable as specified in the context
54 :param list options: Sequence of options that are available to select from
55 :return: Exactly one item of ``options`` that has been chosen by the user
56 """
57 # Please see http://click.pocoo.org/4/api/#click.prompt
58 if not isinstance(options, list):
59 raise TypeError
60
61 if not options:
62 raise ValueError
63
64 choice_map = OrderedDict(
65 (u'{}'.format(i), value) for i, value in enumerate(options, 1)
66 )
67 choices = choice_map.keys()
68 default = u'1'
69
70 choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]
71 prompt = u'\n'.join((
72 u'Select {}:'.format(var_name),
73 u'\n'.join(choice_lines),
74 u'Choose from {}'.format(u', '.join(choices))
75 ))
76
77 user_choice = click.prompt(
78 prompt, type=click.Choice(choices), default=default
79 )
80 return choice_map[user_choice]
81
82
83 def render_variable(env, raw, cookiecutter_dict):
84 if not isinstance(raw, basestring):
85 raw = str(raw)
86 template = env.from_string(raw)
87 rendered_template = template.render(cookiecutter=cookiecutter_dict)
88 return rendered_template
89
90
91 def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
92 """Prompt the user which option to choose from the given. Each of the
93 possible choices is rendered beforehand.
94 """
95 rendered_options = [
96 render_variable(env, raw, cookiecutter_dict) for raw in options
97 ]
98
99 if no_input:
100 return rendered_options[0]
101 return read_user_choice(key, rendered_options)
102
103
104 def prompt_for_config(context, no_input=False):
105 """
106 Prompts the user to enter new config, using context as a source for the
107 field names and sample values.
108
109 :param no_input: Prompt the user at command line for manual configuration?
110 """
111 cookiecutter_dict = {}
112 env = Environment()
113
114 for key, raw in iteritems(context[u'cookiecutter']):
115 if key.startswith(u'_'):
116 cookiecutter_dict[key] = raw
117 continue
118
119 if isinstance(raw, list):
120 # We are dealing with a choice variable
121 val = prompt_choice_for_config(
122 cookiecutter_dict, env, key, raw, no_input
123 )
124 else:
125 # We are dealing with a regular variable
126 val = render_variable(env, raw, cookiecutter_dict)
127
128 if not no_input:
129 val = read_user_variable(key, val)
130
131 cookiecutter_dict[key] = val
132 return cookiecutter_dict
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -81,6 +81,8 @@
def render_variable(env, raw, cookiecutter_dict):
+ if raw is None:
+ return None
if not isinstance(raw, basestring):
raw = str(raw)
template = env.from_string(raw)
| {"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -81,6 +81,8 @@\n \n \n def render_variable(env, raw, cookiecutter_dict):\n+ if raw is None:\n+ return None\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n", "issue": "No way to define options that have no defaults\nCurrently if you set a value in `cookiecutter.json` to `null` it becomes `None` and is then turned into the _string_ `'None'`.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\nfrom jinja2.environment import Environment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\nfrom jinja2.environment import Environment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if raw is None:\n return None\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}]} | 1,531 | 100 |
gh_patches_debug_26873 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3025 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix API handling of multi-column UNIQUE and PK columns
See https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.
Fix API handling of multi-column UNIQUE and PK columns
See https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/constraints.py`
Content:
```
1 from psycopg.errors import DuplicateTable, UniqueViolation
2 from rest_framework import serializers, status
3
4 from db.constraints import utils as constraint_utils
5 from db.identifiers import is_identifier_too_long
6 from db.constraints.base import ForeignKeyConstraint, UniqueConstraint
7
8 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
9 from mathesar.api.exceptions.validation_exceptions.exceptions import (
10 ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,
11 InvalidTableName
12 )
13 from mathesar.api.serializers.shared_serializers import (
14 MathesarPolymorphicErrorMixin,
15 ReadWritePolymorphicSerializerMappingMixin,
16 )
17 from mathesar.models.base import Column, Constraint, Table
18
19
20 class Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):
21 def get_queryset(self):
22 table_id = self.context.get('table_id', None)
23 queryset = super(Table_Filtered_Column_queryset, self).get_queryset()
24 if table_id is None or not queryset:
25 return None
26 return queryset.filter(table__id=table_id)
27
28
29 class BaseConstraintSerializer(serializers.ModelSerializer):
30 name = serializers.CharField(required=False)
31 type = serializers.CharField()
32 columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)
33
34 class Meta:
35 model = Constraint
36 fields = ['id', 'name', 'type', 'columns']
37
38 def construct_constraint_obj(self, table, data):
39 columns_attnum = [column.attnum for column in data.get('columns')]
40 if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:
41 return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)
42 return None
43
44 def create(self, validated_data):
45 table = self.context['table']
46 constraint_obj = self.construct_constraint_obj(table, validated_data)
47 # Additional check is needed because we support read operations for primary key constraint,
48 # but we don't support write operations
49 if constraint_obj is None:
50 constraint_type = validated_data.get('type', None)
51 raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')
52 try:
53 constraint = table.add_constraint(constraint_obj)
54 except DuplicateTable as e:
55 raise database_api_exceptions.DuplicateTableAPIException(
56 e,
57 message='Relation with the same name already exists',
58 status_code=status.HTTP_400_BAD_REQUEST
59 )
60 except UniqueViolation as e:
61 raise database_api_exceptions.UniqueViolationAPIException(
62 e,
63 status_code=status.HTTP_400_BAD_REQUEST
64 )
65 return constraint
66
67 def validate_name(self, name):
68 if is_identifier_too_long(name):
69 raise database_api_exceptions.IdentifierTooLong(field='name')
70 return name
71
72
73 class ForeignKeyConstraintSerializer(BaseConstraintSerializer):
74 class Meta:
75 model = Constraint
76 fields = BaseConstraintSerializer.Meta.fields + [
77 'referent_columns',
78 'referent_table',
79 'onupdate',
80 'ondelete',
81 'deferrable',
82 'match'
83 ]
84
85 referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
86 referent_table = serializers.SerializerMethodField()
87 onupdate = serializers.ChoiceField(
88 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
89 required=False,
90 allow_null=True
91 )
92 ondelete = serializers.ChoiceField(
93 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
94 required=False,
95 allow_null=True
96 )
97 deferrable = serializers.BooleanField(allow_null=True, required=False)
98 match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)
99
100 def get_referent_table(self, obj):
101 return obj.referent_columns[0].table.id
102
103 def construct_constraint_obj(self, table, data):
104 columns_attnum = [column.attnum for column in data.get('columns')]
105 referent_columns = data.get('referent_columns')
106 referent_columns_attnum = [column.attnum for column in referent_columns]
107 constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']
108 constraint_options = {
109 constraint_options_field: data[constraint_options_field]
110 for constraint_options_field in constraint_options_fields if constraint_options_field in data
111 }
112 return ForeignKeyConstraint(
113 data.get('name', None),
114 table.oid,
115 columns_attnum,
116 referent_columns[0].table.oid,
117 referent_columns_attnum,
118 constraint_options
119 )
120
121
122 class ConstraintSerializer(
123 ReadWritePolymorphicSerializerMappingMixin,
124 MathesarPolymorphicErrorMixin,
125 serializers.ModelSerializer
126 ):
127 class Meta:
128 model = Constraint
129 fields = '__all__'
130
131 serializers_mapping = {
132 'foreignkey': ForeignKeyConstraintSerializer,
133 'primary': BaseConstraintSerializer,
134 'unique': BaseConstraintSerializer,
135 }
136
137 def get_mapping_field(self, data):
138 if isinstance(data, Constraint):
139 constraint_type = data.type
140 else:
141 constraint_type = data.get('type', None)
142 assert constraint_type is not None
143 return constraint_type
144
145 def create(self, validated_data):
146 serializer = self.get_serializer_class(self.get_mapping_field(validated_data))
147 return serializer.create(validated_data)
148
149 def run_validation(self, data):
150 if referent_table := data.get('referent_table', None):
151 referent_table_name = Table.current_objects.get(id=referent_table).name
152 if any(
153 invalid_char in referent_table_name
154 for invalid_char in ('(', ')')
155 ):
156 raise InvalidTableName(
157 referent_table_name,
158 field='referent_table'
159 )
160 constraint_type = data.get('type', None)
161 if constraint_type not in self.serializers_mapping.keys():
162 raise UnsupportedConstraintAPIException(constraint_type=constraint_type)
163 columns = data.get('columns', None)
164 if columns == []:
165 raise ConstraintColumnEmptyAPIException(field='columns')
166 return super(ConstraintSerializer, self).run_validation(data)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py
--- a/mathesar/api/serializers/constraints.py
+++ b/mathesar/api/serializers/constraints.py
@@ -17,10 +17,17 @@
from mathesar.models.base import Column, Constraint, Table
-class Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):
+class TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):
+ """
+ Limits the accepted related primary key values to a specific table.
+ For example, if the PrimaryKeyRelatedField is instantiated with a
+ Column queryset, only columns in the "associated table" are
+ accepted. The "associated table" is defined by the context dict's
+ `table_id` value.
+ """
def get_queryset(self):
table_id = self.context.get('table_id', None)
- queryset = super(Table_Filtered_Column_queryset, self).get_queryset()
+ queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()
if table_id is None or not queryset:
return None
return queryset.filter(table__id=table_id)
@@ -29,7 +36,7 @@
class BaseConstraintSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=False)
type = serializers.CharField()
- columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)
+ columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)
class Meta:
model = Constraint
| {"golden_diff": "diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py\n--- a/mathesar/api/serializers/constraints.py\n+++ b/mathesar/api/serializers/constraints.py\n@@ -17,10 +17,17 @@\n from mathesar.models.base import Column, Constraint, Table\n \n \n-class Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):\n+class TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n+ \"\"\"\n+ Limits the accepted related primary key values to a specific table.\n+ For example, if the PrimaryKeyRelatedField is instantiated with a\n+ Column queryset, only columns in the \"associated table\" are\n+ accepted. The \"associated table\" is defined by the context dict's\n+ `table_id` value.\n+ \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n- queryset = super(Table_Filtered_Column_queryset, self).get_queryset()\n+ queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n@@ -29,7 +36,7 @@\n class BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n- columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)\n+ columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n \n class Meta:\n model = Constraint\n", "issue": "Fix API handling of multi-column UNIQUE and PK columns\nSee https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.\nFix API handling of multi-column UNIQUE and PK columns\nSee https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.\n", "before_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(Table_Filtered_Column_queryset, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in self.serializers_mapping.keys():\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}], "after_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n \"\"\"\n Limits the accepted related primary key values to a specific table.\n For example, if the PrimaryKeyRelatedField is instantiated with a\n Column queryset, only columns in the \"associated table\" are\n accepted. The \"associated table\" is defined by the context dict's\n `table_id` value.\n \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in self.serializers_mapping.keys():\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}]} | 2,042 | 331 |
gh_patches_debug_30077 | rasdani/github-patches | git_diff | chainer__chainer-1158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mix CPU mode and GPU mode in one network
I want to use CPU mode for some functions and GPU mode for others in one network.
When I use a large number of vocabulary in EmbedID about >=1,000,000 words, it consumes large size of memory in GPU. In this situation, I need to use EmbedID in CPU, and to use other functions like LSTMs in GPU.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/copy.py`
Content:
```
1 from chainer import cuda
2 from chainer import function
3 from chainer.utils import type_check
4
5
6 class Copy(function.Function):
7
8 """Copy an input :class:`cupy.ndarray` onto another device."""
9
10 def __init__(self, out_device):
11 self.out_device = out_device
12
13 def check_type_forward(self, in_types):
14 type_check.expect(
15 in_types.size() == 1
16 )
17
18 def forward_cpu(self, x):
19 return x[0].copy(),
20
21 def forward_gpu(self, x):
22 return cuda.copy(x[0], out_device=self.out_device),
23
24 def backward_cpu(self, x, gy):
25 return gy[0].copy(),
26
27 def backward_gpu(self, x, gy):
28 return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
29
30
31 def copy(x, dst):
32 """Copies the input variable onto the specified device.
33
34 This function copies the array of input variable onto the device specified
35 by ``dst`` if the original array is on GPU, and otherwise just copies the
36 array within host memory.
37
38 Args:
39 x (~chainer.Variable): Variable to be copied.
40 dst: Target device specifier.
41
42 Returns:
43 ~chainer.Variable: Output variable.
44
45 """
46 return Copy(dst)(x)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/array/copy.py b/chainer/functions/array/copy.py
--- a/chainer/functions/array/copy.py
+++ b/chainer/functions/array/copy.py
@@ -16,24 +16,45 @@
)
def forward_cpu(self, x):
- return x[0].copy(),
+ if self.out_device == -1:
+ return x[0].copy(),
+ else:
+ return cuda.to_gpu(x[0], device=self.out_device),
def forward_gpu(self, x):
- return cuda.copy(x[0], out_device=self.out_device),
+ if self.out_device == -1:
+ return cuda.to_cpu(x[0]),
+ else:
+ return cuda.copy(x[0], out_device=self.out_device),
+
+ def backward(self, inputs, grad_outputs):
+ # In this function, `grad_outputs` contains cuda arrays even when
+ # `inputs` only contains numpy arrays.
+ if isinstance(inputs[0], cuda.ndarray):
+ return self.backward_gpu(inputs, grad_outputs)
+ else:
+ return self.backward_cpu(inputs, grad_outputs)
def backward_cpu(self, x, gy):
- return gy[0].copy(),
+ if self.out_device == -1:
+ return gy[0].copy(),
+ else:
+ return cuda.to_cpu(gy[0]),
def backward_gpu(self, x, gy):
- return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
+ if self.out_device == -1:
+ return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),
+ else:
+ return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
- by ``dst`` if the original array is on GPU, and otherwise just copies the
- array within host memory.
+ by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
+ This function supports copies from host to device, from device to device
+ and from device to host.
Args:
x (~chainer.Variable): Variable to be copied.
| {"golden_diff": "diff --git a/chainer/functions/array/copy.py b/chainer/functions/array/copy.py\n--- a/chainer/functions/array/copy.py\n+++ b/chainer/functions/array/copy.py\n@@ -16,24 +16,45 @@\n )\n \n def forward_cpu(self, x):\n- return x[0].copy(),\n+ if self.out_device == -1:\n+ return x[0].copy(),\n+ else:\n+ return cuda.to_gpu(x[0], device=self.out_device),\n \n def forward_gpu(self, x):\n- return cuda.copy(x[0], out_device=self.out_device),\n+ if self.out_device == -1:\n+ return cuda.to_cpu(x[0]),\n+ else:\n+ return cuda.copy(x[0], out_device=self.out_device),\n+\n+ def backward(self, inputs, grad_outputs):\n+ # In this function, `grad_outputs` contains cuda arrays even when\n+ # `inputs` only contains numpy arrays.\n+ if isinstance(inputs[0], cuda.ndarray):\n+ return self.backward_gpu(inputs, grad_outputs)\n+ else:\n+ return self.backward_cpu(inputs, grad_outputs)\n \n def backward_cpu(self, x, gy):\n- return gy[0].copy(),\n+ if self.out_device == -1:\n+ return gy[0].copy(),\n+ else:\n+ return cuda.to_cpu(gy[0]),\n \n def backward_gpu(self, x, gy):\n- return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n+ if self.out_device == -1:\n+ return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),\n+ else:\n+ return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n \n \n def copy(x, dst):\n \"\"\"Copies the input variable onto the specified device.\n \n This function copies the array of input variable onto the device specified\n- by ``dst`` if the original array is on GPU, and otherwise just copies the\n- array within host memory.\n+ by ``dst``. When ``dst == -1``, it copies the array onto the host memory.\n+ This function supports copies from host to device, from device to device\n+ and from device to host.\n \n Args:\n x (~chainer.Variable): Variable to be copied.\n", "issue": "Mix CPU mode and GPU mode in one network\nI want to use CPU mode for some functions and GPU mode for others in one network.\nWhen I use a large number of vocabulary in EmbedID about >=1,000,000 words, it consumes large size of memory in GPU. In this situation, I need to use EmbedID in CPU, and to use other functions like LSTMs in GPU.\n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Copy(function.Function):\n\n \"\"\"Copy an input :class:`cupy.ndarray` onto another device.\"\"\"\n\n def __init__(self, out_device):\n self.out_device = out_device\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1\n )\n\n def forward_cpu(self, x):\n return x[0].copy(),\n\n def forward_gpu(self, x):\n return cuda.copy(x[0], out_device=self.out_device),\n\n def backward_cpu(self, x, gy):\n return gy[0].copy(),\n\n def backward_gpu(self, x, gy):\n return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n\n\ndef copy(x, dst):\n \"\"\"Copies the input variable onto the specified device.\n\n This function copies the array of input variable onto the device specified\n by ``dst`` if the original array is on GPU, and otherwise just copies the\n array within host memory.\n\n Args:\n x (~chainer.Variable): Variable to be copied.\n dst: Target device specifier.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Copy(dst)(x)\n", "path": "chainer/functions/array/copy.py"}], "after_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Copy(function.Function):\n\n \"\"\"Copy an input :class:`cupy.ndarray` onto another device.\"\"\"\n\n def __init__(self, out_device):\n self.out_device = out_device\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1\n )\n\n def forward_cpu(self, x):\n if self.out_device == -1:\n return x[0].copy(),\n else:\n return cuda.to_gpu(x[0], device=self.out_device),\n\n def forward_gpu(self, x):\n if self.out_device == -1:\n return cuda.to_cpu(x[0]),\n else:\n return cuda.copy(x[0], out_device=self.out_device),\n\n def backward(self, inputs, grad_outputs):\n # In this function, `grad_outputs` contains cuda arrays even when\n # `inputs` only contains numpy arrays.\n if isinstance(inputs[0], cuda.ndarray):\n return self.backward_gpu(inputs, grad_outputs)\n else:\n return self.backward_cpu(inputs, grad_outputs)\n\n def backward_cpu(self, x, gy):\n if self.out_device == -1:\n return gy[0].copy(),\n else:\n return cuda.to_cpu(gy[0]),\n\n def backward_gpu(self, x, gy):\n if self.out_device == -1:\n return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),\n else:\n return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n\n\ndef copy(x, dst):\n \"\"\"Copies the input variable onto the specified device.\n\n This function copies the array of input variable onto the device specified\n by ``dst``. When ``dst == -1``, it copies the array onto the host memory.\n This function supports copies from host to device, from device to device\n and from device to host.\n\n Args:\n x (~chainer.Variable): Variable to be copied.\n dst: Target device specifier.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Copy(dst)(x)\n", "path": "chainer/functions/array/copy.py"}]} | 717 | 515 |
gh_patches_debug_24801 | rasdani/github-patches | git_diff | mirumee__ariadne-158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
If parse_literal is not present try fallbacking to `parse_value(ast.value)`
Following idea was brought up in discussion for #24:
> Maybe we could default to calling parse_value with ast.value when only one function is provided?
This requires further study. `IntValue`, `StringValue` and friends are obvious to deal with, but but complex types like `ListValue` may require some extra unpacking magic.
Still, if it is possible to pull off, it could be an excellent convenience for developers creating custom scalars, saving the need for potentially maintaining two very simiiar implementations, one doing `isinstance(value, basestr)` and other `isinstance(value, StringValue)`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/scalars.py`
Content:
```
1 from typing import Optional, cast
2
3 from graphql.type import (
4 GraphQLNamedType,
5 GraphQLScalarLiteralParser,
6 GraphQLScalarSerializer,
7 GraphQLScalarType,
8 GraphQLScalarValueParser,
9 GraphQLSchema,
10 )
11
12 from .types import SchemaBindable
13
14
15 class ScalarType(SchemaBindable):
16 _serialize: Optional[GraphQLScalarSerializer]
17 _parse_value: Optional[GraphQLScalarValueParser]
18 _parse_literal: Optional[GraphQLScalarLiteralParser]
19
20 def __init__(
21 self,
22 name: str,
23 *,
24 serializer: GraphQLScalarSerializer = None,
25 value_parser: GraphQLScalarValueParser = None,
26 literal_parser: GraphQLScalarLiteralParser = None,
27 ) -> None:
28 self.name = name
29 self._serialize = serializer
30 self._parse_value = value_parser
31 self._parse_literal = literal_parser
32
33 def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:
34 self._serialize = f
35 return f
36
37 def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
38 self._parse_value = f
39 return f
40
41 def set_literal_parser(
42 self, f: GraphQLScalarLiteralParser
43 ) -> GraphQLScalarLiteralParser:
44 self._parse_literal = f
45 return f
46
47 # Alias above setters for consistent decorator API
48 serializer = set_serializer
49 value_parser = set_value_parser
50 literal_parser = set_literal_parser
51
52 def bind_to_schema(self, schema: GraphQLSchema) -> None:
53 graphql_type = schema.type_map.get(self.name)
54 self.validate_graphql_type(graphql_type)
55 graphql_type = cast(GraphQLScalarType, graphql_type)
56
57 if self._serialize:
58 # See mypy bug https://github.com/python/mypy/issues/2427
59 graphql_type.serialize = self._serialize # type: ignore
60 if self._parse_value:
61 graphql_type.parse_value = self._parse_value
62 if self._parse_literal:
63 graphql_type.parse_literal = self._parse_literal
64
65 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
66 if not graphql_type:
67 raise ValueError("Scalar %s is not defined in the schema" % self.name)
68 if not isinstance(graphql_type, GraphQLScalarType):
69 raise ValueError(
70 "%s is defined in the schema, but it is instance of %s (expected %s)"
71 % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/scalars.py b/ariadne/scalars.py
--- a/ariadne/scalars.py
+++ b/ariadne/scalars.py
@@ -1,5 +1,11 @@
from typing import Optional, cast
+from graphql.language.ast import (
+ BooleanValueNode,
+ FloatValueNode,
+ IntValueNode,
+ StringValueNode,
+)
from graphql.type import (
GraphQLNamedType,
GraphQLScalarLiteralParser,
@@ -36,6 +42,8 @@
def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
self._parse_value = f
+ if not self._parse_literal:
+ self._parse_literal = create_default_literal_parser(f)
return f
def set_literal_parser(
@@ -70,3 +78,15 @@
"%s is defined in the schema, but it is instance of %s (expected %s)"
% (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
)
+
+
+SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)
+
+
+def create_default_literal_parser(
+ value_parser: GraphQLScalarValueParser
+) -> GraphQLScalarLiteralParser:
+ def default_literal_parser(ast):
+ return value_parser(ast.value)
+
+ return default_literal_parser
| {"golden_diff": "diff --git a/ariadne/scalars.py b/ariadne/scalars.py\n--- a/ariadne/scalars.py\n+++ b/ariadne/scalars.py\n@@ -1,5 +1,11 @@\n from typing import Optional, cast\n \n+from graphql.language.ast import (\n+ BooleanValueNode,\n+ FloatValueNode,\n+ IntValueNode,\n+ StringValueNode,\n+)\n from graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n@@ -36,6 +42,8 @@\n \n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n+ if not self._parse_literal:\n+ self._parse_literal = create_default_literal_parser(f)\n return f\n \n def set_literal_parser(\n@@ -70,3 +78,15 @@\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n+\n+\n+SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n+\n+\n+def create_default_literal_parser(\n+ value_parser: GraphQLScalarValueParser\n+) -> GraphQLScalarLiteralParser:\n+ def default_literal_parser(ast):\n+ return value_parser(ast.value)\n+\n+ return default_literal_parser\n", "issue": "If parse_literal is not present try fallbacking to `parse_value(ast.value)`\nFollowing idea was brought up in discussion for #24:\r\n\r\n> Maybe we could default to calling parse_value with ast.value when only one function is provided?\r\n\r\nThis requires further study. `IntValue`, `StringValue` and friends are obvious to deal with, but but complex types like `ListValue` may require some extra unpacking magic.\r\n\r\nStill, if it is possible to pull off, it could be an excellent convenience for developers creating custom scalars, saving the need for potentially maintaining two very simiiar implementations, one doing `isinstance(value, basestr)` and other `isinstance(value, StringValue)`.\n", "before_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n", "path": "ariadne/scalars.py"}], "after_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.language.ast import (\n BooleanValueNode,\n FloatValueNode,\n IntValueNode,\n StringValueNode,\n)\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n if not self._parse_literal:\n self._parse_literal = create_default_literal_parser(f)\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n\n\nSCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n\n\ndef create_default_literal_parser(\n value_parser: GraphQLScalarValueParser\n) -> GraphQLScalarLiteralParser:\n def default_literal_parser(ast):\n return value_parser(ast.value)\n\n return default_literal_parser\n", "path": "ariadne/scalars.py"}]} | 1,080 | 310 |
gh_patches_debug_20645 | rasdani/github-patches | git_diff | Flexget__Flexget-1101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problem with sabnzbd after upgrade to version 2.0.5
Hi,
Last night I upgraded to version 2.0.5 from 1.2.521. I haven't made any config changes. Everything seems to work except adding downloads to sabnzbd. Reverting back to version 1.2.521 made everything work again.
```
2016-04-27 07:30 CRITICAL sabnzbd usenet Failed to use sabnzbd. Requested http://sabnzbd:8080/sabnzbd/api?nzbname=REL_NAME&apikey=11111&mode=addurl&name=URL_THAT_WORKS
2016-04-27 07:30 CRITICAL sabnzbd usenet Result was: 'Task' object has no attribute 'get'
2016-04-27 07:30 ERROR entry usenet Failed REL_NAME (sabnzbd unreachable)
```
Manually clicking the url does add the nzb to sabznbd.
This runs in a FreeBSD 10.3 jail using Python 2.7.11 installed and upgraded using pip.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/output/sabnzbd.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import *
3 from future.moves.urllib.parse import urlencode
4
5 import logging
6
7 from flexget import plugin
8 from flexget.event import event
9
10 log = logging.getLogger('sabnzbd')
11
12
13 class OutputSabnzbd(object):
14 """
15 Example::
16
17 sabnzbd:
18 apikey: 123456
19 url: http://localhost/sabnzbd/api?
20 category: movies
21
22 All parameters::
23
24 sabnzbd:
25 apikey: ...
26 url: ...
27 category: ...
28 script: ...
29 pp: ...
30 priority: ...
31 """
32 schema = {
33 'type': 'object',
34 'properties': {
35 'key': {'type': 'string'},
36 'url': {'type': 'string', 'format': 'url'},
37 'category': {'type': 'string'},
38 'script': {'type': 'string'},
39 'pp': {'type': 'string'},
40 'priority': {'type': 'integer'},
41 'password': {'type': 'string'},
42 'username': {'type': 'string'},
43 },
44 'required': ['key', 'url'],
45 'additionalProperties': False,
46 }
47
48 def get_params(self, config):
49 params = {}
50 if 'key' in config:
51 params['apikey'] = config['key']
52 if 'category' in config:
53 params['cat'] = '%s' % config['category']
54 if 'script' in config:
55 params['script'] = config['script']
56 if 'pp' in config:
57 params['pp'] = config['pp']
58 if 'priority' in config:
59 params['priority'] = config['priority']
60 if 'username' in config:
61 params['ma_username'] = config['username']
62 if 'password' in config:
63 params['ma_password'] = config['password']
64 params['mode'] = 'addurl'
65 return params
66
67 def on_task_output(self, task, config):
68 for entry in task.accepted:
69 if task.options.test:
70 log.info('Would add into sabnzbd: %s' % entry['title'])
71 continue
72
73 params = self.get_params(config)
74 # allow overriding the category
75 if 'category' in entry:
76 # Dirty hack over the next few lines to strip out non-ascii
77 # chars. We're going to urlencode this, which causes
78 # serious issues in python2.x if it's not ascii input.
79 params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])
80 params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])
81 # add cleaner nzb name (undocumented api feature)
82 params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])
83
84 request_url = config['url'] + urlencode(params)
85 log.debug('request_url: %s' % request_url)
86 try:
87 response = task.get(request_url)
88 except Exception as e:
89 log.critical('Failed to use sabnzbd. Requested %s' % request_url)
90 log.critical('Result was: %s' % e)
91 entry.fail('sabnzbd unreachable')
92 if task.options.debug:
93 log.exception(e)
94 continue
95
96 if 'error' in response.text.lower():
97 entry.fail(response.text.replace('\n', ''))
98 else:
99 log.info('Added `%s` to SABnzbd' % (entry['title']))
100
101
102 @event('plugin.register')
103 def register_plugin():
104 plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/output/sabnzbd.py b/flexget/plugins/output/sabnzbd.py
--- a/flexget/plugins/output/sabnzbd.py
+++ b/flexget/plugins/output/sabnzbd.py
@@ -6,6 +6,7 @@
from flexget import plugin
from flexget.event import event
+from requests import RequestException
log = logging.getLogger('sabnzbd')
@@ -84,10 +85,10 @@
request_url = config['url'] + urlencode(params)
log.debug('request_url: %s' % request_url)
try:
- response = task.get(request_url)
- except Exception as e:
+ response = task.requests.get(request_url)
+ except RequestException as e:
log.critical('Failed to use sabnzbd. Requested %s' % request_url)
- log.critical('Result was: %s' % e)
+ log.critical('Result was: %s' % e.args[0])
entry.fail('sabnzbd unreachable')
if task.options.debug:
log.exception(e)
| {"golden_diff": "diff --git a/flexget/plugins/output/sabnzbd.py b/flexget/plugins/output/sabnzbd.py\n--- a/flexget/plugins/output/sabnzbd.py\n+++ b/flexget/plugins/output/sabnzbd.py\n@@ -6,6 +6,7 @@\n \n from flexget import plugin\n from flexget.event import event\n+from requests import RequestException\n \n log = logging.getLogger('sabnzbd')\n \n@@ -84,10 +85,10 @@\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n- response = task.get(request_url)\n- except Exception as e:\n+ response = task.requests.get(request_url)\n+ except RequestException as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n- log.critical('Result was: %s' % e)\n+ log.critical('Result was: %s' % e.args[0])\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n", "issue": "Problem with sabnzbd after upgrade to version 2.0.5\nHi,\n\nLast night I upgraded to version 2.0.5 from 1.2.521. I haven't made any config changes. Everything seems to work except adding downloads to sabnzbd. Reverting back to version 1.2.521 made everything work again.\n\n```\n2016-04-27 07:30 CRITICAL sabnzbd usenet Failed to use sabnzbd. Requested http://sabnzbd:8080/sabnzbd/api?nzbname=REL_NAME&apikey=11111&mode=addurl&name=URL_THAT_WORKS\n2016-04-27 07:30 CRITICAL sabnzbd usenet Result was: 'Task' object has no attribute 'get'\n2016-04-27 07:30 ERROR entry usenet Failed REL_NAME (sabnzbd unreachable) \n```\n\nManually clicking the url does add the nzb to sabznbd. \n\nThis runs in a FreeBSD 10.3 jail using Python 2.7.11 installed and upgraded using pip.\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import *\nfrom future.moves.urllib.parse import urlencode\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('sabnzbd')\n\n\nclass OutputSabnzbd(object):\n \"\"\"\n Example::\n\n sabnzbd:\n apikey: 123456\n url: http://localhost/sabnzbd/api?\n category: movies\n\n All parameters::\n\n sabnzbd:\n apikey: ...\n url: ...\n category: ...\n script: ...\n pp: ...\n priority: ...\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'key': {'type': 'string'},\n 'url': {'type': 'string', 'format': 'url'},\n 'category': {'type': 'string'},\n 'script': {'type': 'string'},\n 'pp': {'type': 'string'},\n 'priority': {'type': 'integer'},\n 'password': {'type': 'string'},\n 'username': {'type': 'string'},\n },\n 'required': ['key', 'url'],\n 'additionalProperties': False,\n }\n\n def get_params(self, config):\n params = {}\n if 'key' in config:\n params['apikey'] = config['key']\n if 'category' in config:\n params['cat'] = '%s' % config['category']\n if 'script' in config:\n params['script'] = config['script']\n if 'pp' in config:\n params['pp'] = config['pp']\n if 'priority' in config:\n params['priority'] = config['priority']\n if 'username' in config:\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n params['mode'] = 'addurl'\n return params\n\n def on_task_output(self, task, config):\n for entry in task.accepted:\n if task.options.test:\n log.info('Would add into sabnzbd: %s' % entry['title'])\n continue\n\n params = self.get_params(config)\n # allow overriding the category\n if 'category' in entry:\n # Dirty hack over the next few lines to strip out non-ascii\n # chars. We're going to urlencode this, which causes\n # serious issues in python2.x if it's not ascii input.\n params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])\n params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n response = task.get(request_url)\n except Exception as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n log.critical('Result was: %s' % e)\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n continue\n\n if 'error' in response.text.lower():\n entry.fail(response.text.replace('\\n', ''))\n else:\n log.info('Added `%s` to SABnzbd' % (entry['title']))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)\n", "path": "flexget/plugins/output/sabnzbd.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import *\nfrom future.moves.urllib.parse import urlencode\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom requests import RequestException\n\nlog = logging.getLogger('sabnzbd')\n\n\nclass OutputSabnzbd(object):\n \"\"\"\n Example::\n\n sabnzbd:\n apikey: 123456\n url: http://localhost/sabnzbd/api?\n category: movies\n\n All parameters::\n\n sabnzbd:\n apikey: ...\n url: ...\n category: ...\n script: ...\n pp: ...\n priority: ...\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'key': {'type': 'string'},\n 'url': {'type': 'string', 'format': 'url'},\n 'category': {'type': 'string'},\n 'script': {'type': 'string'},\n 'pp': {'type': 'string'},\n 'priority': {'type': 'integer'},\n 'password': {'type': 'string'},\n 'username': {'type': 'string'},\n },\n 'required': ['key', 'url'],\n 'additionalProperties': False,\n }\n\n def get_params(self, config):\n params = {}\n if 'key' in config:\n params['apikey'] = config['key']\n if 'category' in config:\n params['cat'] = '%s' % config['category']\n if 'script' in config:\n params['script'] = config['script']\n if 'pp' in config:\n params['pp'] = config['pp']\n if 'priority' in config:\n params['priority'] = config['priority']\n if 'username' in config:\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n params['mode'] = 'addurl'\n return params\n\n def on_task_output(self, task, config):\n for entry in task.accepted:\n if task.options.test:\n log.info('Would add into sabnzbd: %s' % entry['title'])\n continue\n\n params = self.get_params(config)\n # allow overriding the category\n if 'category' in entry:\n # Dirty hack over the next few lines to strip out non-ascii\n # chars. We're going to urlencode this, which causes\n # serious issues in python2.x if it's not ascii input.\n params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])\n params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n response = task.requests.get(request_url)\n except RequestException as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n log.critical('Result was: %s' % e.args[0])\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n continue\n\n if 'error' in response.text.lower():\n entry.fail(response.text.replace('\\n', ''))\n else:\n log.info('Added `%s` to SABnzbd' % (entry['title']))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)\n", "path": "flexget/plugins/output/sabnzbd.py"}]} | 1,560 | 246 |
gh_patches_debug_5867 | rasdani/github-patches | git_diff | napari__napari-3424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`normalize_dtype` excludes big endian types
## 🐛 Bug
```py
In [457]: from napari.utils._dtype import get_dtype_limits
In [458]: get_dtype_limits(np.dtype('<u2'))
Out[458]: (0, 65535)
In [459]: get_dtype_limits(np.dtype('>u2'))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-459-d109d903c3cf> in <module>
----> 1 get_dtype_limits(np.dtype('>u2'))
~/Dropbox (HMS)/Python/forks/napari/napari/utils/_dtype.py in get_dtype_limits(dtype_spec)
103 info = np.finfo(dtype)
104 else:
--> 105 raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')
106 return info.min, info.max
TypeError: Unrecognized or non-numeric dtype: >u2
In [460]: np.iinfo('>u2')
Out[460]: iinfo(min=0, max=65535, dtype=>u2)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/_dtype.py`
Content:
```
1 from typing import Tuple, Union
2
3 import numpy as np
4
5 _np_uints = {
6 8: np.uint8,
7 16: np.uint16,
8 32: np.uint32,
9 64: np.uint64,
10 }
11
12 _np_ints = {
13 8: np.int8,
14 16: np.int16,
15 32: np.int32,
16 64: np.int64,
17 }
18
19 _np_floats = {
20 32: np.float32,
21 64: np.float64,
22 }
23
24 _np_complex = {
25 64: np.complex64,
26 128: np.complex128,
27 }
28
29 _np_kinds = {
30 'uint': _np_uints,
31 'int': _np_ints,
32 'float': _np_floats,
33 'complex': _np_complex,
34 }
35
36
37 def _normalize_str_by_bit_depth(dtype_str, kind):
38 if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'
39 return np.dtype(kind).type
40 bit_dict = _np_kinds[kind]
41 if '128' in dtype_str:
42 return bit_dict[128]
43 if '8' in dtype_str:
44 return bit_dict[8]
45 if '16' in dtype_str:
46 return bit_dict[16]
47 if '32' in dtype_str:
48 return bit_dict[32]
49 if '64' in dtype_str:
50 return bit_dict[64]
51
52
53 def normalize_dtype(dtype_spec):
54 """Return a proper NumPy type given ~any duck array dtype.
55
56 Parameters
57 ----------
58 dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc
59 A type that can be interpreted as a NumPy numeric data type, e.g.
60 'uint32', np.uint8, torch.float32, etc.
61
62 Returns
63 -------
64 dtype : numpy.dtype
65 The corresponding dtype.
66
67 Notes
68 -----
69 half-precision floats are not supported.
70 """
71 dtype_str = str(dtype_spec)
72 if 'uint' in dtype_str:
73 return _normalize_str_by_bit_depth(dtype_str, 'uint')
74 if 'int' in dtype_str:
75 return _normalize_str_by_bit_depth(dtype_str, 'int')
76 if 'float' in dtype_str:
77 return _normalize_str_by_bit_depth(dtype_str, 'float')
78 if 'complex' in dtype_str:
79 return _normalize_str_by_bit_depth(dtype_str, 'complex')
80 if 'bool' in dtype_str:
81 return np.bool_
82
83
84 def get_dtype_limits(dtype_spec) -> Tuple[float, float]:
85 """Return machine limits for numeric types.
86
87 Parameters
88 ----------
89 dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc
90 A type that can be interpreted as a NumPy numeric data type, e.g.
91 'uint32', np.uint8, torch.float32, etc.
92
93 Returns
94 -------
95 limits : tuple
96 The smallest/largest numbers expressible by the type.
97 """
98 dtype = normalize_dtype(dtype_spec)
99 info: Union[np.iinfo, np.finfo]
100 if np.issubdtype(dtype, np.integer):
101 info = np.iinfo(dtype)
102 elif dtype and np.issubdtype(dtype, np.floating):
103 info = np.finfo(dtype)
104 else:
105 raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')
106 return info.min, info.max
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/_dtype.py b/napari/utils/_dtype.py
--- a/napari/utils/_dtype.py
+++ b/napari/utils/_dtype.py
@@ -79,6 +79,11 @@
return _normalize_str_by_bit_depth(dtype_str, 'complex')
if 'bool' in dtype_str:
return np.bool_
+ # If we don't find one of the named dtypes, return the dtype_spec
+ # unchanged. This allows NumPy big endian types to work. See
+ # https://github.com/napari/napari/issues/3421
+ else:
+ return dtype_spec
def get_dtype_limits(dtype_spec) -> Tuple[float, float]:
| {"golden_diff": "diff --git a/napari/utils/_dtype.py b/napari/utils/_dtype.py\n--- a/napari/utils/_dtype.py\n+++ b/napari/utils/_dtype.py\n@@ -79,6 +79,11 @@\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n+ # If we don't find one of the named dtypes, return the dtype_spec\n+ # unchanged. This allows NumPy big endian types to work. See\n+ # https://github.com/napari/napari/issues/3421\n+ else:\n+ return dtype_spec\n \n \n def get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n", "issue": "`normalize_dtype` excludes big endian types\n## \ud83d\udc1b Bug\r\n```py\r\nIn [457]: from napari.utils._dtype import get_dtype_limits\r\n\r\nIn [458]: get_dtype_limits(np.dtype('<u2'))\r\nOut[458]: (0, 65535)\r\n\r\nIn [459]: get_dtype_limits(np.dtype('>u2'))\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-459-d109d903c3cf> in <module>\r\n----> 1 get_dtype_limits(np.dtype('>u2'))\r\n\r\n~/Dropbox (HMS)/Python/forks/napari/napari/utils/_dtype.py in get_dtype_limits(dtype_spec)\r\n 103 info = np.finfo(dtype)\r\n 104 else:\r\n--> 105 raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\r\n 106 return info.min, info.max\r\n\r\nTypeError: Unrecognized or non-numeric dtype: >u2\r\n\r\nIn [460]: np.iinfo('>u2')\r\nOut[460]: iinfo(min=0, max=65535, dtype=>u2)\r\n```\n", "before_files": [{"content": "from typing import Tuple, Union\n\nimport numpy as np\n\n_np_uints = {\n 8: np.uint8,\n 16: np.uint16,\n 32: np.uint32,\n 64: np.uint64,\n}\n\n_np_ints = {\n 8: np.int8,\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\n_np_floats = {\n 32: np.float32,\n 64: np.float64,\n}\n\n_np_complex = {\n 64: np.complex64,\n 128: np.complex128,\n}\n\n_np_kinds = {\n 'uint': _np_uints,\n 'int': _np_ints,\n 'float': _np_floats,\n 'complex': _np_complex,\n}\n\n\ndef _normalize_str_by_bit_depth(dtype_str, kind):\n if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'\n return np.dtype(kind).type\n bit_dict = _np_kinds[kind]\n if '128' in dtype_str:\n return bit_dict[128]\n if '8' in dtype_str:\n return bit_dict[8]\n if '16' in dtype_str:\n return bit_dict[16]\n if '32' in dtype_str:\n return bit_dict[32]\n if '64' in dtype_str:\n return bit_dict[64]\n\n\ndef normalize_dtype(dtype_spec):\n \"\"\"Return a proper NumPy type given ~any duck array dtype.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n dtype : numpy.dtype\n The corresponding dtype.\n\n Notes\n -----\n half-precision floats are not supported.\n \"\"\"\n dtype_str = str(dtype_spec)\n if 'uint' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'uint')\n if 'int' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'int')\n if 'float' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'float')\n if 'complex' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n\n\ndef get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n \"\"\"Return machine limits for numeric types.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n limits : tuple\n The smallest/largest numbers expressible by the type.\n \"\"\"\n dtype = normalize_dtype(dtype_spec)\n info: Union[np.iinfo, np.finfo]\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n elif dtype and np.issubdtype(dtype, np.floating):\n info = np.finfo(dtype)\n else:\n raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\n return info.min, info.max\n", "path": "napari/utils/_dtype.py"}], "after_files": [{"content": "from typing import Tuple, Union\n\nimport numpy as np\n\n_np_uints = {\n 8: np.uint8,\n 16: np.uint16,\n 32: np.uint32,\n 64: np.uint64,\n}\n\n_np_ints = {\n 8: np.int8,\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\n_np_floats = {\n 32: np.float32,\n 64: np.float64,\n}\n\n_np_complex = {\n 64: np.complex64,\n 128: np.complex128,\n}\n\n_np_kinds = {\n 'uint': _np_uints,\n 'int': _np_ints,\n 'float': _np_floats,\n 'complex': _np_complex,\n}\n\n\ndef _normalize_str_by_bit_depth(dtype_str, kind):\n if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'\n return np.dtype(kind).type\n bit_dict = _np_kinds[kind]\n if '128' in dtype_str:\n return bit_dict[128]\n if '8' in dtype_str:\n return bit_dict[8]\n if '16' in dtype_str:\n return bit_dict[16]\n if '32' in dtype_str:\n return bit_dict[32]\n if '64' in dtype_str:\n return bit_dict[64]\n\n\ndef normalize_dtype(dtype_spec):\n \"\"\"Return a proper NumPy type given ~any duck array dtype.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n dtype : numpy.dtype\n The corresponding dtype.\n\n Notes\n -----\n half-precision floats are not supported.\n \"\"\"\n dtype_str = str(dtype_spec)\n if 'uint' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'uint')\n if 'int' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'int')\n if 'float' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'float')\n if 'complex' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n # If we don't find one of the named dtypes, return the dtype_spec\n # unchanged. This allows NumPy big endian types to work. See\n # https://github.com/napari/napari/issues/3421\n else:\n return dtype_spec\n\n\ndef get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n \"\"\"Return machine limits for numeric types.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n limits : tuple\n The smallest/largest numbers expressible by the type.\n \"\"\"\n dtype = normalize_dtype(dtype_spec)\n info: Union[np.iinfo, np.finfo]\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n elif dtype and np.issubdtype(dtype, np.floating):\n info = np.finfo(dtype)\n else:\n raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\n return info.min, info.max\n", "path": "napari/utils/_dtype.py"}]} | 1,527 | 162 |
gh_patches_debug_40002 | rasdani/github-patches | git_diff | carpentries__amy-2211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instructor Selection: Additional filter/sort options for Admin view
On the Instructor Selection [admin view page](https://test-amy.carpentries.org/recruitment/processes/), the admin user can filter by assigned to and by status (open/closed).
We would like to see the following additional options:
* Filter by Online/in-person
* Sort by Priority ascending and descending
* Sort by date ascending and descending
* Filter by curriculum
* Filter by country
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/recruitment/filters.py`
Content:
```
1 import django_filters
2
3 from workshops.fields import ModelSelect2Widget
4 from workshops.filters import AMYFilterSet
5 from workshops.forms import SELECT2_SIDEBAR
6 from workshops.models import Person
7
8 from .models import InstructorRecruitment
9
10
11 class InstructorRecruitmentFilter(AMYFilterSet):
12 assigned_to = django_filters.ModelChoiceFilter(
13 queryset=Person.objects.all(),
14 widget=ModelSelect2Widget(data_view="admin-lookup", attrs=SELECT2_SIDEBAR),
15 )
16
17 class Meta:
18 model = InstructorRecruitment
19 fields = [
20 "assigned_to",
21 "status",
22 ]
23
```
Path: `amy/dashboard/filters.py`
Content:
```
1 from django.db.models import F, QuerySet
2 from django.forms import widgets
3 import django_filters as filters
4
5 from recruitment.models import InstructorRecruitment
6 from workshops.filters import AMYFilterSet
7
8
9 class UpcomingTeachingOpportunitiesFilter(AMYFilterSet):
10 status = filters.ChoiceFilter(
11 choices=(
12 ("online", "Online only"),
13 ("inperson", "Inperson only"),
14 ),
15 empty_label="Any",
16 label="Online/inperson",
17 method="filter_status",
18 )
19
20 only_applied_to = filters.BooleanFilter(
21 label="Show only workshops I have applied to",
22 method="filter_application_only",
23 widget=widgets.CheckboxInput,
24 )
25
26 order_by = filters.OrderingFilter(
27 fields=("event__start",),
28 choices=(
29 ("-calculated_priority", "Priority"),
30 ("event__start", "Event start"),
31 ("-event__start", "Event start (descending)"),
32 ("proximity", "Closer to my airport"),
33 ("-proximity", "Further away from my airport"),
34 ),
35 method="filter_order_by",
36 )
37
38 class Meta:
39 model = InstructorRecruitment
40 fields = [
41 "status",
42 ]
43
44 def filter_status(self, queryset: QuerySet, name: str, value: str) -> QuerySet:
45 """Filter recruitments based on the event (online/inperson) status."""
46 if value == "online":
47 return queryset.filter(event__tags__name="online")
48 elif value == "inperson":
49 return queryset.exclude(event__tags__name="online")
50 else:
51 return queryset
52
53 def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:
54 """Order entries by proximity to user's airport."""
55 try:
56 latitude: float = self.request.user.airport.latitude
57 except AttributeError:
58 latitude = 0.0
59
60 try:
61 longitude: float = self.request.user.airport.longitude
62 except AttributeError:
63 longitude = 0.0
64
65 # `0.0` is neutral element for this equation, so even if user doesn't have the
66 # airport specified, the sorting should still work
67 distance = (F("event__latitude") - latitude) ** 2.0 + (
68 F("event__longitude") - longitude
69 ) ** 2.0
70
71 if values == ["proximity"]:
72 return queryset.annotate(distance=distance).order_by("distance")
73 elif values == ["-proximity"]:
74 return queryset.annotate(distance=distance).order_by("-distance")
75 else:
76 return queryset.order_by(*values)
77
78 def filter_application_only(
79 self, queryset: QuerySet, name: str, value: bool
80 ) -> QuerySet:
81 if value:
82 return queryset.filter(signups__person=self.request.user)
83
84 return queryset
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/dashboard/filters.py b/amy/dashboard/filters.py
--- a/amy/dashboard/filters.py
+++ b/amy/dashboard/filters.py
@@ -3,7 +3,9 @@
import django_filters as filters
from recruitment.models import InstructorRecruitment
-from workshops.filters import AMYFilterSet
+from workshops.fields import Select2MultipleWidget
+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet
+from workshops.models import Curriculum
class UpcomingTeachingOpportunitiesFilter(AMYFilterSet):
@@ -23,6 +25,17 @@
widget=widgets.CheckboxInput,
)
+ country = AllCountriesMultipleFilter(
+ field_name="event__country", widget=Select2MultipleWidget
+ )
+
+ curricula = filters.ModelMultipleChoiceFilter(
+ field_name="event__curricula",
+ queryset=Curriculum.objects.all(),
+ label="Curriculum",
+ widget=Select2MultipleWidget(),
+ )
+
order_by = filters.OrderingFilter(
fields=("event__start",),
choices=(
diff --git a/amy/recruitment/filters.py b/amy/recruitment/filters.py
--- a/amy/recruitment/filters.py
+++ b/amy/recruitment/filters.py
@@ -1,22 +1,68 @@
-import django_filters
+from django.db.models import QuerySet
+import django_filters as filters
-from workshops.fields import ModelSelect2Widget
-from workshops.filters import AMYFilterSet
+from workshops.fields import ModelSelect2Widget, Select2MultipleWidget
+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet
from workshops.forms import SELECT2_SIDEBAR
-from workshops.models import Person
+from workshops.models import Curriculum, Person
from .models import InstructorRecruitment
class InstructorRecruitmentFilter(AMYFilterSet):
- assigned_to = django_filters.ModelChoiceFilter(
+ assigned_to = filters.ModelChoiceFilter(
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="admin-lookup", attrs=SELECT2_SIDEBAR),
)
+ online_inperson = filters.ChoiceFilter(
+ choices=(
+ ("online", "Online only"),
+ ("inperson", "Inperson only"),
+ ),
+ empty_label="Any",
+ label="Online/inperson",
+ method="filter_online_inperson",
+ )
+
+ country = AllCountriesMultipleFilter(
+ field_name="event__country", widget=Select2MultipleWidget
+ )
+
+ curricula = filters.ModelMultipleChoiceFilter(
+ field_name="event__curricula",
+ queryset=Curriculum.objects.all(),
+ label="Curriculum",
+ widget=Select2MultipleWidget(),
+ )
+
+ order_by = filters.OrderingFilter(
+ fields=("event__start",),
+ choices=(
+ ("-calculated_priority", "Priority"),
+ ("event__start", "Event start"),
+ ("-event__start", "Event start (descending)"),
+ ),
+ method="filter_order_by",
+ )
+
class Meta:
model = InstructorRecruitment
fields = [
"assigned_to",
"status",
]
+
+ def filter_online_inperson(
+ self, queryset: QuerySet, name: str, value: str
+ ) -> QuerySet:
+ """Filter recruitments based on the event (online/inperson) status."""
+ if value == "online":
+ return queryset.filter(event__tags__name="online")
+ elif value == "inperson":
+ return queryset.exclude(event__tags__name="online")
+ else:
+ return queryset
+
+ def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:
+ return queryset.order_by(*values)
| {"golden_diff": "diff --git a/amy/dashboard/filters.py b/amy/dashboard/filters.py\n--- a/amy/dashboard/filters.py\n+++ b/amy/dashboard/filters.py\n@@ -3,7 +3,9 @@\n import django_filters as filters\n \n from recruitment.models import InstructorRecruitment\n-from workshops.filters import AMYFilterSet\n+from workshops.fields import Select2MultipleWidget\n+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\n+from workshops.models import Curriculum\n \n \n class UpcomingTeachingOpportunitiesFilter(AMYFilterSet):\n@@ -23,6 +25,17 @@\n widget=widgets.CheckboxInput,\n )\n \n+ country = AllCountriesMultipleFilter(\n+ field_name=\"event__country\", widget=Select2MultipleWidget\n+ )\n+\n+ curricula = filters.ModelMultipleChoiceFilter(\n+ field_name=\"event__curricula\",\n+ queryset=Curriculum.objects.all(),\n+ label=\"Curriculum\",\n+ widget=Select2MultipleWidget(),\n+ )\n+\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\ndiff --git a/amy/recruitment/filters.py b/amy/recruitment/filters.py\n--- a/amy/recruitment/filters.py\n+++ b/amy/recruitment/filters.py\n@@ -1,22 +1,68 @@\n-import django_filters\n+from django.db.models import QuerySet\n+import django_filters as filters\n \n-from workshops.fields import ModelSelect2Widget\n-from workshops.filters import AMYFilterSet\n+from workshops.fields import ModelSelect2Widget, Select2MultipleWidget\n+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\n from workshops.forms import SELECT2_SIDEBAR\n-from workshops.models import Person\n+from workshops.models import Curriculum, Person\n \n from .models import InstructorRecruitment\n \n \n class InstructorRecruitmentFilter(AMYFilterSet):\n- assigned_to = django_filters.ModelChoiceFilter(\n+ assigned_to = filters.ModelChoiceFilter(\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\", attrs=SELECT2_SIDEBAR),\n )\n \n+ online_inperson = filters.ChoiceFilter(\n+ choices=(\n+ (\"online\", \"Online only\"),\n+ (\"inperson\", \"Inperson only\"),\n+ ),\n+ empty_label=\"Any\",\n+ label=\"Online/inperson\",\n+ method=\"filter_online_inperson\",\n+ )\n+\n+ country = AllCountriesMultipleFilter(\n+ field_name=\"event__country\", widget=Select2MultipleWidget\n+ )\n+\n+ curricula = filters.ModelMultipleChoiceFilter(\n+ field_name=\"event__curricula\",\n+ queryset=Curriculum.objects.all(),\n+ label=\"Curriculum\",\n+ widget=Select2MultipleWidget(),\n+ )\n+\n+ order_by = filters.OrderingFilter(\n+ fields=(\"event__start\",),\n+ choices=(\n+ (\"-calculated_priority\", \"Priority\"),\n+ (\"event__start\", \"Event start\"),\n+ (\"-event__start\", \"Event start (descending)\"),\n+ ),\n+ method=\"filter_order_by\",\n+ )\n+\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"assigned_to\",\n \"status\",\n ]\n+\n+ def filter_online_inperson(\n+ self, queryset: QuerySet, name: str, value: str\n+ ) -> QuerySet:\n+ \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n+ if value == \"online\":\n+ return queryset.filter(event__tags__name=\"online\")\n+ elif value == \"inperson\":\n+ return queryset.exclude(event__tags__name=\"online\")\n+ else:\n+ return queryset\n+\n+ def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n+ return queryset.order_by(*values)\n", "issue": "Instructor Selection: Additional filter/sort options for Admin view \nOn the Instructor Selection [admin view page](https://test-amy.carpentries.org/recruitment/processes/), the admin user can filter by assigned to and by status (open/closed).\r\n\r\nWe would like to see the following additional options:\r\n\r\n* Filter by Online/in-person\r\n* Sort by Priority ascending and descending\r\n* Sort by date ascending and descending \r\n* Filter by curriculum\r\n* Filter by country \r\n\n", "before_files": [{"content": "import django_filters\n\nfrom workshops.fields import ModelSelect2Widget\nfrom workshops.filters import AMYFilterSet\nfrom workshops.forms import SELECT2_SIDEBAR\nfrom workshops.models import Person\n\nfrom .models import InstructorRecruitment\n\n\nclass InstructorRecruitmentFilter(AMYFilterSet):\n assigned_to = django_filters.ModelChoiceFilter(\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\", attrs=SELECT2_SIDEBAR),\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"assigned_to\",\n \"status\",\n ]\n", "path": "amy/recruitment/filters.py"}, {"content": "from django.db.models import F, QuerySet\nfrom django.forms import widgets\nimport django_filters as filters\n\nfrom recruitment.models import InstructorRecruitment\nfrom workshops.filters import AMYFilterSet\n\n\nclass UpcomingTeachingOpportunitiesFilter(AMYFilterSet):\n status = filters.ChoiceFilter(\n choices=(\n (\"online\", \"Online only\"),\n (\"inperson\", \"Inperson only\"),\n ),\n empty_label=\"Any\",\n label=\"Online/inperson\",\n method=\"filter_status\",\n )\n\n only_applied_to = filters.BooleanFilter(\n label=\"Show only workshops I have applied to\",\n method=\"filter_application_only\",\n widget=widgets.CheckboxInput,\n )\n\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\n (\"-calculated_priority\", \"Priority\"),\n (\"event__start\", \"Event start\"),\n (\"-event__start\", \"Event start (descending)\"),\n (\"proximity\", \"Closer to my airport\"),\n (\"-proximity\", \"Further away from my airport\"),\n ),\n method=\"filter_order_by\",\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"status\",\n ]\n\n def filter_status(self, queryset: QuerySet, name: str, value: str) -> QuerySet:\n \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n if value == \"online\":\n return queryset.filter(event__tags__name=\"online\")\n elif value == \"inperson\":\n return queryset.exclude(event__tags__name=\"online\")\n else:\n return queryset\n\n def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n \"\"\"Order entries by proximity to user's airport.\"\"\"\n try:\n latitude: float = self.request.user.airport.latitude\n except AttributeError:\n latitude = 0.0\n\n try:\n longitude: float = self.request.user.airport.longitude\n except AttributeError:\n longitude = 0.0\n\n # `0.0` is neutral element for this equation, so even if user doesn't have the\n # airport specified, the sorting should still work\n distance = (F(\"event__latitude\") - latitude) ** 2.0 + (\n F(\"event__longitude\") - longitude\n ) ** 2.0\n\n if values == [\"proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"distance\")\n elif values == [\"-proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"-distance\")\n else:\n return queryset.order_by(*values)\n\n def filter_application_only(\n self, queryset: QuerySet, name: str, value: bool\n ) -> QuerySet:\n if value:\n return queryset.filter(signups__person=self.request.user)\n\n return queryset\n", "path": "amy/dashboard/filters.py"}], "after_files": [{"content": "from django.db.models import QuerySet\nimport django_filters as filters\n\nfrom workshops.fields import ModelSelect2Widget, Select2MultipleWidget\nfrom workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\nfrom workshops.forms import SELECT2_SIDEBAR\nfrom workshops.models import Curriculum, Person\n\nfrom .models import InstructorRecruitment\n\n\nclass InstructorRecruitmentFilter(AMYFilterSet):\n assigned_to = filters.ModelChoiceFilter(\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\", attrs=SELECT2_SIDEBAR),\n )\n\n online_inperson = filters.ChoiceFilter(\n choices=(\n (\"online\", \"Online only\"),\n (\"inperson\", \"Inperson only\"),\n ),\n empty_label=\"Any\",\n label=\"Online/inperson\",\n method=\"filter_online_inperson\",\n )\n\n country = AllCountriesMultipleFilter(\n field_name=\"event__country\", widget=Select2MultipleWidget\n )\n\n curricula = filters.ModelMultipleChoiceFilter(\n field_name=\"event__curricula\",\n queryset=Curriculum.objects.all(),\n label=\"Curriculum\",\n widget=Select2MultipleWidget(),\n )\n\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\n (\"-calculated_priority\", \"Priority\"),\n (\"event__start\", \"Event start\"),\n (\"-event__start\", \"Event start (descending)\"),\n ),\n method=\"filter_order_by\",\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"assigned_to\",\n \"status\",\n ]\n\n def filter_online_inperson(\n self, queryset: QuerySet, name: str, value: str\n ) -> QuerySet:\n \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n if value == \"online\":\n return queryset.filter(event__tags__name=\"online\")\n elif value == \"inperson\":\n return queryset.exclude(event__tags__name=\"online\")\n else:\n return queryset\n\n def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n return queryset.order_by(*values)\n", "path": "amy/recruitment/filters.py"}, {"content": "from django.db.models import F, QuerySet\nfrom django.forms import widgets\nimport django_filters as filters\n\nfrom recruitment.models import InstructorRecruitment\nfrom workshops.fields import Select2MultipleWidget\nfrom workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\nfrom workshops.models import Curriculum\n\n\nclass UpcomingTeachingOpportunitiesFilter(AMYFilterSet):\n status = filters.ChoiceFilter(\n choices=(\n (\"online\", \"Online only\"),\n (\"inperson\", \"Inperson only\"),\n ),\n empty_label=\"Any\",\n label=\"Online/inperson\",\n method=\"filter_status\",\n )\n\n only_applied_to = filters.BooleanFilter(\n label=\"Show only workshops I have applied to\",\n method=\"filter_application_only\",\n widget=widgets.CheckboxInput,\n )\n\n country = AllCountriesMultipleFilter(\n field_name=\"event__country\", widget=Select2MultipleWidget\n )\n\n curricula = filters.ModelMultipleChoiceFilter(\n field_name=\"event__curricula\",\n queryset=Curriculum.objects.all(),\n label=\"Curriculum\",\n widget=Select2MultipleWidget(),\n )\n\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\n (\"-calculated_priority\", \"Priority\"),\n (\"event__start\", \"Event start\"),\n (\"-event__start\", \"Event start (descending)\"),\n (\"proximity\", \"Closer to my airport\"),\n (\"-proximity\", \"Further away from my airport\"),\n ),\n method=\"filter_order_by\",\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"status\",\n ]\n\n def filter_status(self, queryset: QuerySet, name: str, value: str) -> QuerySet:\n \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n if value == \"online\":\n return queryset.filter(event__tags__name=\"online\")\n elif value == \"inperson\":\n return queryset.exclude(event__tags__name=\"online\")\n else:\n return queryset\n\n def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n \"\"\"Order entries by proximity to user's airport.\"\"\"\n try:\n latitude: float = self.request.user.airport.latitude\n except AttributeError:\n latitude = 0.0\n\n try:\n longitude: float = self.request.user.airport.longitude\n except AttributeError:\n longitude = 0.0\n\n # `0.0` is neutral element for this equation, so even if user doesn't have the\n # airport specified, the sorting should still work\n distance = (F(\"event__latitude\") - latitude) ** 2.0 + (\n F(\"event__longitude\") - longitude\n ) ** 2.0\n\n if values == [\"proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"distance\")\n elif values == [\"-proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"-distance\")\n else:\n return queryset.order_by(*values)\n\n def filter_application_only(\n self, queryset: QuerySet, name: str, value: bool\n ) -> QuerySet:\n if value:\n return queryset.filter(signups__person=self.request.user)\n\n return queryset\n", "path": "amy/dashboard/filters.py"}]} | 1,313 | 837 |
gh_patches_debug_16815 | rasdani/github-patches | git_diff | pypa__cibuildwheel-701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing the default branch to `main`
This is just a heads up, I'm planning to change the default branch on this repo to `main` this week, let's say Wednesday 26th. Github have a tool to change it over, and update PRs to target the new branch, but you might have to update it on local checkouts and forks. Shouldn't be a big issue though, this is what [Github say](https://github.com/github/renaming#renaming-existing-branches) about it:
> Renaming a branch will:
>
> - Re-target any open pull requests
> - Update any draft releases based on the branch
> - Move any branch protection rules that explicitly reference the old name
> - Update the branch used to build GitHub Pages, if applicable
> - Show a notice to repository contributors, maintainers, and admins on the repository homepage with instructions to update local copies of the repository
> - Show a notice to contributors who git push to the old branch
> - Redirect web requests for the old branch name to the new branch name
> - Return a "Moved Permanently" response in API requests for the old branch name
---
Checklist for the switch:
- [x] Use the Github tool to change it over
- [x] Find/replace `master` to `main` in CI configs, docs, scripts, example code, etc
- [x] Change default branch on Readthedocs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/make_dependency_update_pr.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from __future__ import annotations
4
5 import os
6 import sys
7 import textwrap
8 import time
9 from pathlib import Path
10 from subprocess import run
11
12 import click
13
14
15 def shell(cmd, **kwargs):
16 return run([cmd], shell=True, **kwargs)
17
18
19 def git_repo_has_changes():
20 unstaged_changes = shell("git diff-index --quiet HEAD --").returncode != 0
21 staged_changes = shell("git diff-index --quiet --cached HEAD --").returncode != 0
22 return unstaged_changes or staged_changes
23
24
25 @click.command()
26 def main():
27 project_root = Path(__file__).parent / ".."
28 os.chdir(project_root)
29
30 if git_repo_has_changes():
31 print("Your git repo has uncommitted changes. Commit or stash before continuing.")
32 sys.exit(1)
33
34 previous_branch = shell(
35 "git rev-parse --abbrev-ref HEAD", check=True, capture_output=True, encoding="utf8"
36 ).stdout.strip()
37
38 shell("git fetch origin", check=True)
39
40 timestamp = time.strftime("%Y-%m-%dT%H-%M-%S", time.gmtime())
41 branch_name = f"update-constraints-{timestamp}"
42
43 shell(f"git checkout -b {branch_name} origin/master", check=True)
44
45 try:
46 shell("bin/update_dependencies.py", check=True)
47
48 if not git_repo_has_changes():
49 print("Done: no constraint updates required.")
50 return
51
52 shell('git commit -a -m "Update dependencies"', check=True)
53 body = textwrap.dedent(
54 f"""
55 Update the versions of our dependencies.
56
57 PR generated by `{os.path.basename(__file__)}`.
58 """
59 )
60 run(
61 [
62 "gh",
63 "pr",
64 "create",
65 "--repo=pypa/cibuildwheel",
66 "--base=master",
67 "--title=Update dependencies",
68 f"--body='{body}'",
69 ],
70 check=True,
71 )
72
73 print("Done.")
74 finally:
75 # remove any local changes
76 shell("git checkout -- .")
77 shell(f"git checkout {previous_branch}", check=True)
78 shell(f"git branch -D --force {branch_name}", check=True)
79
80
81 if __name__ == "__main__":
82 main.main(standalone_mode=True)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/make_dependency_update_pr.py b/bin/make_dependency_update_pr.py
--- a/bin/make_dependency_update_pr.py
+++ b/bin/make_dependency_update_pr.py
@@ -40,7 +40,7 @@
timestamp = time.strftime("%Y-%m-%dT%H-%M-%S", time.gmtime())
branch_name = f"update-constraints-{timestamp}"
- shell(f"git checkout -b {branch_name} origin/master", check=True)
+ shell(f"git checkout -b {branch_name} origin/main", check=True)
try:
shell("bin/update_dependencies.py", check=True)
@@ -63,7 +63,7 @@
"pr",
"create",
"--repo=pypa/cibuildwheel",
- "--base=master",
+ "--base=main",
"--title=Update dependencies",
f"--body='{body}'",
],
| {"golden_diff": "diff --git a/bin/make_dependency_update_pr.py b/bin/make_dependency_update_pr.py\n--- a/bin/make_dependency_update_pr.py\n+++ b/bin/make_dependency_update_pr.py\n@@ -40,7 +40,7 @@\n timestamp = time.strftime(\"%Y-%m-%dT%H-%M-%S\", time.gmtime())\n branch_name = f\"update-constraints-{timestamp}\"\n \n- shell(f\"git checkout -b {branch_name} origin/master\", check=True)\n+ shell(f\"git checkout -b {branch_name} origin/main\", check=True)\n \n try:\n shell(\"bin/update_dependencies.py\", check=True)\n@@ -63,7 +63,7 @@\n \"pr\",\n \"create\",\n \"--repo=pypa/cibuildwheel\",\n- \"--base=master\",\n+ \"--base=main\",\n \"--title=Update dependencies\",\n f\"--body='{body}'\",\n ],\n", "issue": "Changing the default branch to `main`\nThis is just a heads up, I'm planning to change the default branch on this repo to `main` this week, let's say Wednesday 26th. Github have a tool to change it over, and update PRs to target the new branch, but you might have to update it on local checkouts and forks. Shouldn't be a big issue though, this is what [Github say](https://github.com/github/renaming#renaming-existing-branches) about it:\r\n\r\n> Renaming a branch will:\r\n> \r\n> - Re-target any open pull requests\r\n> - Update any draft releases based on the branch\r\n> - Move any branch protection rules that explicitly reference the old name\r\n> - Update the branch used to build GitHub Pages, if applicable\r\n> - Show a notice to repository contributors, maintainers, and admins on the repository homepage with instructions to update local copies of the repository\r\n> - Show a notice to contributors who git push to the old branch\r\n> - Redirect web requests for the old branch name to the new branch name\r\n> - Return a \"Moved Permanently\" response in API requests for the old branch name\r\n\r\n---\r\n\r\nChecklist for the switch:\r\n\r\n- [x] Use the Github tool to change it over\r\n- [x] Find/replace `master` to `main` in CI configs, docs, scripts, example code, etc\r\n- [x] Change default branch on Readthedocs\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport textwrap\nimport time\nfrom pathlib import Path\nfrom subprocess import run\n\nimport click\n\n\ndef shell(cmd, **kwargs):\n return run([cmd], shell=True, **kwargs)\n\n\ndef git_repo_has_changes():\n unstaged_changes = shell(\"git diff-index --quiet HEAD --\").returncode != 0\n staged_changes = shell(\"git diff-index --quiet --cached HEAD --\").returncode != 0\n return unstaged_changes or staged_changes\n\n\[email protected]()\ndef main():\n project_root = Path(__file__).parent / \"..\"\n os.chdir(project_root)\n\n if git_repo_has_changes():\n print(\"Your git repo has uncommitted changes. Commit or stash before continuing.\")\n sys.exit(1)\n\n previous_branch = shell(\n \"git rev-parse --abbrev-ref HEAD\", check=True, capture_output=True, encoding=\"utf8\"\n ).stdout.strip()\n\n shell(\"git fetch origin\", check=True)\n\n timestamp = time.strftime(\"%Y-%m-%dT%H-%M-%S\", time.gmtime())\n branch_name = f\"update-constraints-{timestamp}\"\n\n shell(f\"git checkout -b {branch_name} origin/master\", check=True)\n\n try:\n shell(\"bin/update_dependencies.py\", check=True)\n\n if not git_repo_has_changes():\n print(\"Done: no constraint updates required.\")\n return\n\n shell('git commit -a -m \"Update dependencies\"', check=True)\n body = textwrap.dedent(\n f\"\"\"\n Update the versions of our dependencies.\n\n PR generated by `{os.path.basename(__file__)}`.\n \"\"\"\n )\n run(\n [\n \"gh\",\n \"pr\",\n \"create\",\n \"--repo=pypa/cibuildwheel\",\n \"--base=master\",\n \"--title=Update dependencies\",\n f\"--body='{body}'\",\n ],\n check=True,\n )\n\n print(\"Done.\")\n finally:\n # remove any local changes\n shell(\"git checkout -- .\")\n shell(f\"git checkout {previous_branch}\", check=True)\n shell(f\"git branch -D --force {branch_name}\", check=True)\n\n\nif __name__ == \"__main__\":\n main.main(standalone_mode=True)\n", "path": "bin/make_dependency_update_pr.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport textwrap\nimport time\nfrom pathlib import Path\nfrom subprocess import run\n\nimport click\n\n\ndef shell(cmd, **kwargs):\n return run([cmd], shell=True, **kwargs)\n\n\ndef git_repo_has_changes():\n unstaged_changes = shell(\"git diff-index --quiet HEAD --\").returncode != 0\n staged_changes = shell(\"git diff-index --quiet --cached HEAD --\").returncode != 0\n return unstaged_changes or staged_changes\n\n\[email protected]()\ndef main():\n project_root = Path(__file__).parent / \"..\"\n os.chdir(project_root)\n\n if git_repo_has_changes():\n print(\"Your git repo has uncommitted changes. Commit or stash before continuing.\")\n sys.exit(1)\n\n previous_branch = shell(\n \"git rev-parse --abbrev-ref HEAD\", check=True, capture_output=True, encoding=\"utf8\"\n ).stdout.strip()\n\n shell(\"git fetch origin\", check=True)\n\n timestamp = time.strftime(\"%Y-%m-%dT%H-%M-%S\", time.gmtime())\n branch_name = f\"update-constraints-{timestamp}\"\n\n shell(f\"git checkout -b {branch_name} origin/main\", check=True)\n\n try:\n shell(\"bin/update_dependencies.py\", check=True)\n\n if not git_repo_has_changes():\n print(\"Done: no constraint updates required.\")\n return\n\n shell('git commit -a -m \"Update dependencies\"', check=True)\n body = textwrap.dedent(\n f\"\"\"\n Update the versions of our dependencies.\n\n PR generated by `{os.path.basename(__file__)}`.\n \"\"\"\n )\n run(\n [\n \"gh\",\n \"pr\",\n \"create\",\n \"--repo=pypa/cibuildwheel\",\n \"--base=main\",\n \"--title=Update dependencies\",\n f\"--body='{body}'\",\n ],\n check=True,\n )\n\n print(\"Done.\")\n finally:\n # remove any local changes\n shell(\"git checkout -- .\")\n shell(f\"git checkout {previous_branch}\", check=True)\n shell(f\"git branch -D --force {branch_name}\", check=True)\n\n\nif __name__ == \"__main__\":\n main.main(standalone_mode=True)\n", "path": "bin/make_dependency_update_pr.py"}]} | 1,227 | 201 |
gh_patches_debug_14764 | rasdani/github-patches | git_diff | kserve__kserve-882 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing requirements.txt in the Pypi source code
**What steps did you take and what happened:**
The requirements.txt file is missing in the source code on Pypi so setuptools will not work.
```
Executing setuptoolsBuildPhase
Traceback (most recent call last):
File "nix_run_setup", line 8, in <module>
exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
File "setup.py", line 23, in <module>
with open('requirements.txt') as f:
FileNotFoundError: [Errno 2] No such file or directory: 'requirements.txt'
builder for '/nix/store/z8sh0v4cji9aq9v02865273xvmhcwzgh-python3.8-kfserving-0.3.0.1.drv' failed with exit code 1
cannot build derivation '/nix/store/75ihn4avq52qdpavs0s8c1y0nj0wjfdx-python3-3.8.2-env.drv': 1 dependencies couldn't be built
```
**What did you expect to happen:**
requirements.txt in the tar.gz archive
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version: 0.3.0.1
- Kubeflow version:
- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`): NixOS 20.03 (Markhor) x86_64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/setup.py`
Content:
```
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 TESTS_REQUIRES = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 with open('requirements.txt') as f:
24 REQUIRES = f.readlines()
25
26 setuptools.setup(
27 name='kfserving',
28 version='0.3.0.1',
29 author="Kubeflow Authors",
30 author_email='[email protected], [email protected]',
31 license="Apache License Version 2.0",
32 url="https://github.com/kubeflow/kfserving/python/kfserving",
33 description="KFServing Python SDK",
34 long_description="Python SDK for KFServing Server and Client.",
35 python_requires='>=3.6',
36 packages=[
37 'kfserving',
38 'kfserving.api',
39 'kfserving.constants',
40 'kfserving.models',
41 'kfserving.handlers',
42 'kfserving.utils',
43 ],
44 package_data={},
45 include_package_data=False,
46 zip_safe=False,
47 classifiers=[
48 'Intended Audience :: Developers',
49 'Intended Audience :: Education',
50 'Intended Audience :: Science/Research',
51 'Programming Language :: Python :: 3',
52 'Programming Language :: Python :: 3.6',
53 'Programming Language :: Python :: 3.7',
54 "License :: OSI Approved :: Apache Software License",
55 "Operating System :: OS Independent",
56 'Topic :: Scientific/Engineering',
57 'Topic :: Scientific/Engineering :: Artificial Intelligence',
58 'Topic :: Software Development',
59 'Topic :: Software Development :: Libraries',
60 'Topic :: Software Development :: Libraries :: Python Modules',
61 ],
62 install_requires=REQUIRES,
63 tests_require=TESTS_REQUIRES,
64 extras_require={'test': TESTS_REQUIRES}
65 )
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py
--- a/python/kfserving/setup.py
+++ b/python/kfserving/setup.py
@@ -25,7 +25,7 @@
setuptools.setup(
name='kfserving',
- version='0.3.0.1',
+ version='0.3.0.2',
author="Kubeflow Authors",
author_email='[email protected], [email protected]',
license="Apache License Version 2.0",
@@ -41,8 +41,8 @@
'kfserving.handlers',
'kfserving.utils',
],
- package_data={},
- include_package_data=False,
+ package_data={'': ['requirements.txt']},
+ include_package_data=True,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py\n--- a/python/kfserving/setup.py\n+++ b/python/kfserving/setup.py\n@@ -25,7 +25,7 @@\n \n setuptools.setup(\n name='kfserving',\n- version='0.3.0.1',\n+ version='0.3.0.2',\n author=\"Kubeflow Authors\",\n author_email='[email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n@@ -41,8 +41,8 @@\n 'kfserving.handlers',\n 'kfserving.utils',\n ],\n- package_data={},\n- include_package_data=False,\n+ package_data={'': ['requirements.txt']},\n+ include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n", "issue": "Missing requirements.txt in the Pypi source code\n**What steps did you take and what happened:**\r\nThe requirements.txt file is missing in the source code on Pypi so setuptools will not work.\r\n\r\n```\r\nExecuting setuptoolsBuildPhase\r\nTraceback (most recent call last):\r\n File \"nix_run_setup\", line 8, in <module>\r\n exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\\\r\\\\n', '\\\\n'), __file__, 'exec'))\r\n File \"setup.py\", line 23, in <module>\r\n with open('requirements.txt') as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'requirements.txt'\r\nbuilder for '/nix/store/z8sh0v4cji9aq9v02865273xvmhcwzgh-python3.8-kfserving-0.3.0.1.drv' failed with exit code 1\r\ncannot build derivation '/nix/store/75ihn4avq52qdpavs0s8c1y0nj0wjfdx-python3-3.8.2-env.drv': 1 dependencies couldn't be built\r\n```\r\n\r\n**What did you expect to happen:**\r\nrequirements.txt in the tar.gz archive\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version: 0.3.0.1\r\n- Kubeflow version:\r\n- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`): NixOS 20.03 (Markhor) x86_64\r\n\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kfserving',\n version='0.3.0.1',\n author=\"Kubeflow Authors\",\n author_email='[email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kubeflow/kfserving/python/kfserving\",\n description=\"KFServing Python SDK\",\n long_description=\"Python SDK for KFServing Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kfserving',\n 'kfserving.api',\n 'kfserving.constants',\n 'kfserving.models',\n 'kfserving.handlers',\n 'kfserving.utils',\n ],\n package_data={},\n include_package_data=False,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kfserving/setup.py"}], "after_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kfserving',\n version='0.3.0.2',\n author=\"Kubeflow Authors\",\n author_email='[email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kubeflow/kfserving/python/kfserving\",\n description=\"KFServing Python SDK\",\n long_description=\"Python SDK for KFServing Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kfserving',\n 'kfserving.api',\n 'kfserving.constants',\n 'kfserving.models',\n 'kfserving.handlers',\n 'kfserving.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kfserving/setup.py"}]} | 1,288 | 197 |
gh_patches_debug_11393 | rasdani/github-patches | git_diff | ManimCommunity__manim-189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
With no scene specified on CLI, manim gives false choice.
I was running the command:
`manim -pl --media_dir ~/renderings ~/dev/manim/example_scenes/basic.py `
and was greeted by this:
```
[19:29:42] INFO Read configuration files: config.py:416
['/home/cobordism/.local/lib/python3.8/site-packages/manim/default.cfg']
1: GraphScene
2: LinearTransformationScene
3: MovingCameraScene
4: OpeningManimExample
5: ReconfigurableScene
6: SampleSpaceScene
7: SpecialThreeDScene
8: SquareToCircle
9: ThreeDScene
10: UpdatersExample
11: VectorScene
12: WarpSquare
13: WriteStuff
14: ZoomedScene
Choose number corresponding to desired scene/arguments.
(Use comma separated list for multiple entries)
Choice(s):
```
Clearly something is wonky here The choices should have been only the scenes from basic.py.
i.e. OpeningManimExample, SquareToCircle, WarpSquare, WriteStuff, UpdatersExample.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/__main__.py`
Content:
```
1 import inspect
2 import os
3 import platform
4 import subprocess as sp
5 import sys
6 import re
7 import traceback
8 import importlib.util
9
10 from .config import file_writer_config
11 from .scene.scene import Scene
12 from .utils.sounds import play_error_sound
13 from .utils.sounds import play_finish_sound
14 from . import constants
15 from .logger import logger
16
17
18 def open_file_if_needed(file_writer):
19 if file_writer_config["quiet"]:
20 curr_stdout = sys.stdout
21 sys.stdout = open(os.devnull, "w")
22
23 open_file = any([
24 file_writer_config["preview"],
25 file_writer_config["show_file_in_finder"]
26 ])
27 if open_file:
28 current_os = platform.system()
29 file_paths = []
30
31 if file_writer_config["save_last_frame"]:
32 file_paths.append(file_writer.get_image_file_path())
33 if file_writer_config["write_to_movie"]:
34 file_paths.append(file_writer.get_movie_file_path())
35
36 for file_path in file_paths:
37 if current_os == "Windows":
38 os.startfile(file_path)
39 else:
40 commands = []
41 if current_os == "Linux":
42 commands.append("xdg-open")
43 elif current_os.startswith("CYGWIN"):
44 commands.append("cygstart")
45 else: # Assume macOS
46 commands.append("open")
47
48 if file_writer_config["show_file_in_finder"]:
49 commands.append("-R")
50
51 commands.append(file_path)
52
53 # commands.append("-g")
54 FNULL = open(os.devnull, 'w')
55 sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)
56 FNULL.close()
57
58 if file_writer_config["quiet"]:
59 sys.stdout.close()
60 sys.stdout = curr_stdout
61
62
63 def is_child_scene(obj, module):
64 return (inspect.isclass(obj)
65 and issubclass(obj, Scene)
66 and obj != Scene
67 and obj.__module__.startswith(module.__name__))
68
69
70 def prompt_user_for_choice(scene_classes):
71 num_to_class = {}
72 for count, scene_class in enumerate(scene_classes):
73 count += 1 # start with 1 instead of 0
74 name = scene_class.__name__
75 print("%d: %s" % (count, name))
76 num_to_class[count] = scene_class
77 try:
78 user_input = input(constants.CHOOSE_NUMBER_MESSAGE)
79 return [num_to_class[int(num_str)]
80 for num_str in re.split(r"\s*,\s*", user_input.strip())]
81 except KeyError:
82 logger.error(constants.INVALID_NUMBER_MESSAGE)
83 sys.exit(2)
84 except EOFError:
85 sys.exit(1)
86
87
88 def get_scenes_to_render(scene_classes):
89 if not scene_classes:
90 logger.error(constants.NO_SCENE_MESSAGE)
91 return []
92 if file_writer_config["write_all"]:
93 return scene_classes
94 result = []
95 for scene_name in file_writer_config["scene_names"]:
96 found = False
97 for scene_class in scene_classes:
98 if scene_class.__name__ == scene_name:
99 result.append(scene_class)
100 found = True
101 break
102 if not found and (scene_name != ""):
103 logger.error(
104 constants.SCENE_NOT_FOUND_MESSAGE.format(
105 scene_name
106 )
107 )
108 if result:
109 return result
110 return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)
111
112
113 def get_scene_classes_from_module(module):
114 if hasattr(module, "SCENES_IN_ORDER"):
115 return module.SCENES_IN_ORDER
116 else:
117 return [
118 member[1]
119 for member in inspect.getmembers(
120 module,
121 lambda x: is_child_scene(x, module)
122 )
123 ]
124
125
126 def get_module(file_name):
127 if file_name == "-":
128 module = types.ModuleType("input_scenes")
129 code = sys.stdin.read()
130 try:
131 exec(code, module.__dict__)
132 return module
133 except Exception as e:
134 logger.error(f"Failed to render scene: {str(e)}")
135 sys.exit(2)
136 else:
137 if os.path.exists(file_name):
138 module_name = re.sub(r"\..+$", "", file_name.replace(os.sep, "."))
139 spec = importlib.util.spec_from_file_location(module_name, file_name)
140 module = importlib.util.module_from_spec(spec)
141 spec.loader.exec_module(module)
142 return module
143 else:
144 raise FileNotFoundError(f'{file_name} not found')
145
146
147 def main():
148 module = get_module(file_writer_config["input_file"])
149 all_scene_classes = get_scene_classes_from_module(module)
150 scene_classes_to_render = get_scenes_to_render(all_scene_classes)
151 sound_on = file_writer_config["sound"]
152 for SceneClass in scene_classes_to_render:
153 try:
154 # By invoking, this renders the full scene
155 scene = SceneClass()
156 open_file_if_needed(scene.file_writer)
157 if sound_on:
158 play_finish_sound()
159 except Exception:
160 print("\n\n")
161 traceback.print_exc()
162 print("\n\n")
163 if sound_on:
164 play_error_sound()
165
166
167 if __name__ == "__main__":
168 main()
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/__main__.py b/manim/__main__.py
--- a/manim/__main__.py
+++ b/manim/__main__.py
@@ -135,7 +135,9 @@
sys.exit(2)
else:
if os.path.exists(file_name):
- module_name = re.sub(r"\..+$", "", file_name.replace(os.sep, "."))
+ if file_name[-3:] != ".py":
+ raise Exception(f"{file_name} is not a valid Manim python script.")
+ module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]
spec = importlib.util.spec_from_file_location(module_name, file_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
| {"golden_diff": "diff --git a/manim/__main__.py b/manim/__main__.py\n--- a/manim/__main__.py\n+++ b/manim/__main__.py\n@@ -135,7 +135,9 @@\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n- module_name = re.sub(r\"\\..+$\", \"\", file_name.replace(os.sep, \".\"))\n+ if file_name[-3:] != \".py\":\n+ raise Exception(f\"{file_name} is not a valid Manim python script.\")\n+ module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n", "issue": "With no scene specified on CLI, manim gives false choice.\nI was running the command:\r\n`manim -pl --media_dir ~/renderings ~/dev/manim/example_scenes/basic.py ` \r\nand was greeted by this:\r\n```\r\n\r\n[19:29:42] INFO Read configuration files: config.py:416\r\n ['/home/cobordism/.local/lib/python3.8/site-packages/manim/default.cfg'] \r\n1: GraphScene\r\n2: LinearTransformationScene\r\n3: MovingCameraScene\r\n4: OpeningManimExample\r\n5: ReconfigurableScene\r\n6: SampleSpaceScene\r\n7: SpecialThreeDScene\r\n8: SquareToCircle\r\n9: ThreeDScene\r\n10: UpdatersExample\r\n11: VectorScene\r\n12: WarpSquare\r\n13: WriteStuff\r\n14: ZoomedScene\r\n\r\nChoose number corresponding to desired scene/arguments.\r\n(Use comma separated list for multiple entries)\r\nChoice(s): \r\n\r\n```\r\n\r\nClearly something is wonky here The choices should have been only the scenes from basic.py.\r\ni.e. OpeningManimExample, SquareToCircle, WarpSquare, WriteStuff, UpdatersExample.\r\n\r\n\n", "before_files": [{"content": "import inspect\nimport os\nimport platform\nimport subprocess as sp\nimport sys\nimport re\nimport traceback\nimport importlib.util\n\nfrom .config import file_writer_config\nfrom .scene.scene import Scene\nfrom .utils.sounds import play_error_sound\nfrom .utils.sounds import play_finish_sound\nfrom . import constants\nfrom .logger import logger\n\n\ndef open_file_if_needed(file_writer):\n if file_writer_config[\"quiet\"]:\n curr_stdout = sys.stdout\n sys.stdout = open(os.devnull, \"w\")\n\n open_file = any([\n file_writer_config[\"preview\"],\n file_writer_config[\"show_file_in_finder\"]\n ])\n if open_file:\n current_os = platform.system()\n file_paths = []\n\n if file_writer_config[\"save_last_frame\"]:\n file_paths.append(file_writer.get_image_file_path())\n if file_writer_config[\"write_to_movie\"]:\n file_paths.append(file_writer.get_movie_file_path())\n\n for file_path in file_paths:\n if current_os == \"Windows\":\n os.startfile(file_path)\n else:\n commands = []\n if current_os == \"Linux\":\n commands.append(\"xdg-open\")\n elif current_os.startswith(\"CYGWIN\"):\n commands.append(\"cygstart\")\n else: # Assume macOS\n commands.append(\"open\")\n\n if file_writer_config[\"show_file_in_finder\"]:\n commands.append(\"-R\")\n\n commands.append(file_path)\n\n # commands.append(\"-g\")\n FNULL = open(os.devnull, 'w')\n sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)\n FNULL.close()\n\n if file_writer_config[\"quiet\"]:\n sys.stdout.close()\n sys.stdout = curr_stdout\n\n\ndef is_child_scene(obj, module):\n return (inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__))\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n print(\"%d: %s\" % (count, name))\n num_to_class[count] = scene_class\n try:\n user_input = input(constants.CHOOSE_NUMBER_MESSAGE)\n return [num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if file_writer_config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in file_writer_config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(\n constants.SCENE_NOT_FOUND_MESSAGE.format(\n scene_name\n )\n )\n if result:\n return result\n return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)\n\n\ndef get_scene_classes_from_module(module):\n if hasattr(module, \"SCENES_IN_ORDER\"):\n return module.SCENES_IN_ORDER\n else:\n return [\n member[1]\n for member in inspect.getmembers(\n module,\n lambda x: is_child_scene(x, module)\n )\n ]\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n code = sys.stdin.read()\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n module_name = re.sub(r\"\\..+$\", \"\", file_name.replace(os.sep, \".\"))\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f'{file_name} not found')\n\n\ndef main():\n module = get_module(file_writer_config[\"input_file\"])\n all_scene_classes = get_scene_classes_from_module(module)\n scene_classes_to_render = get_scenes_to_render(all_scene_classes)\n sound_on = file_writer_config[\"sound\"]\n for SceneClass in scene_classes_to_render:\n try:\n # By invoking, this renders the full scene\n scene = SceneClass()\n open_file_if_needed(scene.file_writer)\n if sound_on:\n play_finish_sound()\n except Exception:\n print(\"\\n\\n\")\n traceback.print_exc()\n print(\"\\n\\n\")\n if sound_on:\n play_error_sound()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manim/__main__.py"}], "after_files": [{"content": "import inspect\nimport os\nimport platform\nimport subprocess as sp\nimport sys\nimport re\nimport traceback\nimport importlib.util\n\nfrom .config import file_writer_config\nfrom .scene.scene import Scene\nfrom .utils.sounds import play_error_sound\nfrom .utils.sounds import play_finish_sound\nfrom . import constants\nfrom .logger import logger\n\n\ndef open_file_if_needed(file_writer):\n if file_writer_config[\"quiet\"]:\n curr_stdout = sys.stdout\n sys.stdout = open(os.devnull, \"w\")\n\n open_file = any([\n file_writer_config[\"preview\"],\n file_writer_config[\"show_file_in_finder\"]\n ])\n if open_file:\n current_os = platform.system()\n file_paths = []\n\n if file_writer_config[\"save_last_frame\"]:\n file_paths.append(file_writer.get_image_file_path())\n if file_writer_config[\"write_to_movie\"]:\n file_paths.append(file_writer.get_movie_file_path())\n\n for file_path in file_paths:\n if current_os == \"Windows\":\n os.startfile(file_path)\n else:\n commands = []\n if current_os == \"Linux\":\n commands.append(\"xdg-open\")\n elif current_os.startswith(\"CYGWIN\"):\n commands.append(\"cygstart\")\n else: # Assume macOS\n commands.append(\"open\")\n\n if file_writer_config[\"show_file_in_finder\"]:\n commands.append(\"-R\")\n\n commands.append(file_path)\n\n # commands.append(\"-g\")\n FNULL = open(os.devnull, 'w')\n sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)\n FNULL.close()\n\n if file_writer_config[\"quiet\"]:\n sys.stdout.close()\n sys.stdout = curr_stdout\n\n\ndef is_child_scene(obj, module):\n return (inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__))\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n print(\"%d: %s\" % (count, name))\n num_to_class[count] = scene_class\n try:\n user_input = input(constants.CHOOSE_NUMBER_MESSAGE)\n return [num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if file_writer_config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in file_writer_config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(\n constants.SCENE_NOT_FOUND_MESSAGE.format(\n scene_name\n )\n )\n if result:\n return result\n return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)\n\n\ndef get_scene_classes_from_module(module):\n if hasattr(module, \"SCENES_IN_ORDER\"):\n return module.SCENES_IN_ORDER\n else:\n return [\n member[1]\n for member in inspect.getmembers(\n module,\n lambda x: is_child_scene(x, module)\n )\n ]\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n code = sys.stdin.read()\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n if file_name[-3:] != \".py\":\n raise Exception(f\"{file_name} is not a valid Manim python script.\")\n module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f'{file_name} not found')\n\n\ndef main():\n module = get_module(file_writer_config[\"input_file\"])\n all_scene_classes = get_scene_classes_from_module(module)\n scene_classes_to_render = get_scenes_to_render(all_scene_classes)\n sound_on = file_writer_config[\"sound\"]\n for SceneClass in scene_classes_to_render:\n try:\n # By invoking, this renders the full scene\n scene = SceneClass()\n open_file_if_needed(scene.file_writer)\n if sound_on:\n play_finish_sound()\n except Exception:\n print(\"\\n\\n\")\n traceback.print_exc()\n print(\"\\n\\n\")\n if sound_on:\n play_error_sound()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manim/__main__.py"}]} | 1,998 | 173 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.