problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_4748 | rasdani/github-patches | git_diff | kubeflow__pipelines-268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Text/link displayed after calling run_pipeline should be "Run link" not "Job link"
Here: https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/_client.py#L153
</issue>
<code>
[start of sdk/python/kfp/_client.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import six
17 import time
18 import logging
19 import json
20 import os
21 import tarfile
22 import yaml
23 from datetime import datetime
24
25
26 class Client(object):
27 """ API Client for KubeFlow Pipeline.
28 """
29
30 def __init__(self, host='ml-pipeline.kubeflow.svc.cluster.local:8888'):
31 """Create a new instance of kfp client.
32
33 Args:
34 host: the API host. If running inside the cluster as a Pod, default value should work.
35 """
36
37 try:
38 import kfp_experiment
39 except ImportError:
40 raise Exception('This module requires installation of kfp_experiment')
41
42 try:
43 import kfp_run
44 except ImportError:
45 raise Exception('This module requires installation of kfp_run')
46
47 config = kfp_run.configuration.Configuration()
48 config.host = host
49 api_client = kfp_run.api_client.ApiClient(config)
50 self._run_api = kfp_run.api.run_service_api.RunServiceApi(api_client)
51
52 config = kfp_experiment.configuration.Configuration()
53 config.host = host
54 api_client = kfp_experiment.api_client.ApiClient(config)
55 self._experiment_api = \
56 kfp_experiment.api.experiment_service_api.ExperimentServiceApi(api_client)
57
58 def _is_ipython(self):
59 """Returns whether we are running in notebook."""
60 try:
61 import IPython
62 except ImportError:
63 return False
64
65 return True
66
67 def create_experiment(self, name):
68 """Create a new experiment.
69 Args:
70 name: the name of the experiment.
71 Returns:
72 An Experiment object. Most important field is id.
73 """
74 import kfp_experiment
75
76 exp = kfp_experiment.models.ApiExperiment(name=name)
77 response = self._experiment_api.create_experiment(body=exp)
78
79 if self._is_ipython():
80 import IPython
81 html = \
82 ('Experiment link <a href="/pipeline/#/experiments/details/%s" target="_blank" >here</a>'
83 % response.id)
84 IPython.display.display(IPython.display.HTML(html))
85 return response
86
87 def list_experiments(self, page_token='', page_size=10, sort_by=''):
88 """List experiments.
89 Args:
90 page_token: token for starting of the page.
91 page_size: size of the page.
92 sort_by: can be '[field_name]', '[field_name] des'. For example, 'name des'.
93 Returns:
94 A response object including a list of experiments and next page token.
95 """
96 response = self._experiment_api.list_experiment(
97 page_token=page_token, page_size=page_size, sort_by=sort_by)
98 return response
99
100 def get_experiment(self, experiment_id):
101 """Get details of an experiment
102 Args:
103 id of the experiment.
104 Returns:
105 A response object including details of a experiment.
106 Throws:
107 Exception if experiment is not found.
108 """
109 return self._experiment_api.get_experiment(id=experiment_id)
110
111 def _extract_pipeline_yaml(self, tar_file):
112 with tarfile.open(tar_file, "r:gz") as tar:
113 all_yaml_files = [m for m in tar if m.isfile() and
114 (os.path.splitext(m.name)[-1] == '.yaml' or os.path.splitext(m.name)[-1] == '.yml')]
115 if len(all_yaml_files) == 0:
116 raise ValueError('Invalid package. Missing pipeline yaml file in the package.')
117
118 if len(all_yaml_files) > 1:
119 raise ValueError('Invalid package. Multiple yaml files in the package.')
120
121 with tar.extractfile(all_yaml_files[0]) as f:
122 return yaml.load(f)
123
124 def run_pipeline(self, experiment_id, job_name, pipeline_package_path, params={}):
125 """Run a specified pipeline.
126
127 Args:
128 experiment_id: The string id of an experiment.
129 job_name: name of the job.
130 pipeline_package_path: local path of the pipeline package(tar.gz file).
131 params: a dictionary with key (string) as param name and value (string) as as param value.
132
133 Returns:
134 A run object. Most important field is id.
135 """
136 import kfp_run
137
138 pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)
139 pipeline_json_string = json.dumps(pipeline_obj)
140 api_params = [kfp_run.ApiParameter(name=k, value=str(v)) for k,v in six.iteritems(params)]
141 key = kfp_run.models.ApiResourceKey(id=experiment_id,
142 type=kfp_run.models.ApiResourceType.EXPERIMENT)
143 reference = kfp_run.models.ApiResourceReference(key, kfp_run.models.ApiRelationship.OWNER)
144 spec = kfp_run.models.ApiPipelineSpec(
145 workflow_manifest=pipeline_json_string, parameters=api_params)
146 run_body = kfp_run.models.ApiRun(
147 pipeline_spec=spec, resource_references=[reference], name=job_name)
148
149 response = self._run_api.create_run(body=run_body)
150
151 if self._is_ipython():
152 import IPython
153 html = ('Job link <a href="/pipeline/#/runs/details/%s" target="_blank" >here</a>'
154 % response.run.id)
155 IPython.display.display(IPython.display.HTML(html))
156 return response.run
157
158 def list_runs(self, page_token='', page_size=10, sort_by=''):
159 """List runs.
160 Args:
161 page_token: token for starting of the page.
162 page_size: size of the page.
163 sort_by: one of 'field_name', 'field_name des'. For example, 'name des'.
164 Returns:
165 A response object including a list of experiments and next page token.
166 """
167 response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by)
168 return response
169
170 def get_run(self, run_id):
171 """Get run details.
172 Args:
173 id of the run.
174 Returns:
175 A response object including details of a run.
176 Throws:
177 Exception if run is not found.
178 """
179 return self._run_api.get_run(run_id=run_id)
180
181 def wait_for_run_completion(self, run_id, timeout):
182 """Wait for a run to complete.
183 Args:
184 run_id: run id, returned from run_pipeline.
185 timeout: timeout in seconds.
186 Returns:
187 A run detail object: Most important fields are run and pipeline_runtime
188 """
189 status = 'Running:'
190 start_time = datetime.now()
191 while status is None or status.lower() not in ['succeeded', 'failed', 'skipped', 'error']:
192 get_run_response = self._run_api.get_run(run_id=run_id)
193 status = get_run_response.run.status
194 elapsed_time = (datetime.now() - start_time).seconds
195 logging.info('Waiting for the job to complete...')
196 if elapsed_time > timeout:
197 raise TimeoutError('Run timeout')
198 time.sleep(5)
199 return get_run_response
200
201 def _get_workflow_json(self, run_id):
202 """Get the workflow json.
203 Args:
204 run_id: run id, returned from run_pipeline.
205 Returns:
206 workflow: json workflow
207 """
208 get_run_response = self._run_api.get_run(run_id=run_id)
209 workflow = get_run_response.pipeline_runtime.workflow_manifest
210 workflow_json = json.loads(workflow)
211 return workflow_json
212
[end of sdk/python/kfp/_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/kfp/_client.py b/sdk/python/kfp/_client.py
--- a/sdk/python/kfp/_client.py
+++ b/sdk/python/kfp/_client.py
@@ -150,7 +150,7 @@
if self._is_ipython():
import IPython
- html = ('Job link <a href="/pipeline/#/runs/details/%s" target="_blank" >here</a>'
+ html = ('Run link <a href="/pipeline/#/runs/details/%s" target="_blank" >here</a>'
% response.run.id)
IPython.display.display(IPython.display.HTML(html))
return response.run
| {"golden_diff": "diff --git a/sdk/python/kfp/_client.py b/sdk/python/kfp/_client.py\n--- a/sdk/python/kfp/_client.py\n+++ b/sdk/python/kfp/_client.py\n@@ -150,7 +150,7 @@\n \n if self._is_ipython():\n import IPython\n- html = ('Job link <a href=\"/pipeline/#/runs/details/%s\" target=\"_blank\" >here</a>'\n+ html = ('Run link <a href=\"/pipeline/#/runs/details/%s\" target=\"_blank\" >here</a>'\n % response.run.id)\n IPython.display.display(IPython.display.HTML(html))\n return response.run\n", "issue": "Text/link displayed after calling run_pipeline should be \"Run link\" not \"Job link\" \nHere: https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/_client.py#L153\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport six\nimport time\nimport logging\nimport json\nimport os\nimport tarfile\nimport yaml\nfrom datetime import datetime\n\n\nclass Client(object):\n \"\"\" API Client for KubeFlow Pipeline.\n \"\"\"\n\n def __init__(self, host='ml-pipeline.kubeflow.svc.cluster.local:8888'):\n \"\"\"Create a new instance of kfp client.\n\n Args:\n host: the API host. If running inside the cluster as a Pod, default value should work.\n \"\"\"\n\n try:\n import kfp_experiment\n except ImportError:\n raise Exception('This module requires installation of kfp_experiment')\n\n try:\n import kfp_run\n except ImportError:\n raise Exception('This module requires installation of kfp_run')\n\n config = kfp_run.configuration.Configuration()\n config.host = host\n api_client = kfp_run.api_client.ApiClient(config)\n self._run_api = kfp_run.api.run_service_api.RunServiceApi(api_client)\n\n config = kfp_experiment.configuration.Configuration()\n config.host = host\n api_client = kfp_experiment.api_client.ApiClient(config)\n self._experiment_api = \\\n kfp_experiment.api.experiment_service_api.ExperimentServiceApi(api_client)\n\n def _is_ipython(self):\n \"\"\"Returns whether we are running in notebook.\"\"\"\n try:\n import IPython\n except ImportError:\n return False\n\n return True\n\n def create_experiment(self, name):\n \"\"\"Create a new experiment.\n Args:\n name: the name of the experiment.\n Returns:\n An Experiment object. Most important field is id.\n \"\"\"\n import kfp_experiment\n\n exp = kfp_experiment.models.ApiExperiment(name=name)\n response = self._experiment_api.create_experiment(body=exp)\n \n if self._is_ipython():\n import IPython\n html = \\\n ('Experiment link <a href=\"/pipeline/#/experiments/details/%s\" target=\"_blank\" >here</a>'\n % response.id)\n IPython.display.display(IPython.display.HTML(html))\n return response\n\n def list_experiments(self, page_token='', page_size=10, sort_by=''):\n \"\"\"List experiments.\n Args:\n page_token: token for starting of the page.\n page_size: size of the page.\n sort_by: can be '[field_name]', '[field_name] des'. For example, 'name des'.\n Returns:\n A response object including a list of experiments and next page token.\n \"\"\"\n response = self._experiment_api.list_experiment(\n page_token=page_token, page_size=page_size, sort_by=sort_by)\n return response\n\n def get_experiment(self, experiment_id):\n \"\"\"Get details of an experiment\n Args:\n id of the experiment.\n Returns:\n A response object including details of a experiment.\n Throws:\n Exception if experiment is not found. \n \"\"\"\n return self._experiment_api.get_experiment(id=experiment_id)\n\n def _extract_pipeline_yaml(self, tar_file):\n with tarfile.open(tar_file, \"r:gz\") as tar:\n all_yaml_files = [m for m in tar if m.isfile() and \n (os.path.splitext(m.name)[-1] == '.yaml' or os.path.splitext(m.name)[-1] == '.yml')]\n if len(all_yaml_files) == 0:\n raise ValueError('Invalid package. Missing pipeline yaml file in the package.')\n \n if len(all_yaml_files) > 1:\n raise ValueError('Invalid package. Multiple yaml files in the package.')\n \n with tar.extractfile(all_yaml_files[0]) as f:\n return yaml.load(f)\n\n def run_pipeline(self, experiment_id, job_name, pipeline_package_path, params={}):\n \"\"\"Run a specified pipeline.\n\n Args:\n experiment_id: The string id of an experiment.\n job_name: name of the job.\n pipeline_package_path: local path of the pipeline package(tar.gz file).\n params: a dictionary with key (string) as param name and value (string) as as param value.\n\n Returns:\n A run object. Most important field is id.\n \"\"\"\n import kfp_run\n\n pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)\n pipeline_json_string = json.dumps(pipeline_obj)\n api_params = [kfp_run.ApiParameter(name=k, value=str(v)) for k,v in six.iteritems(params)]\n key = kfp_run.models.ApiResourceKey(id=experiment_id,\n type=kfp_run.models.ApiResourceType.EXPERIMENT)\n reference = kfp_run.models.ApiResourceReference(key, kfp_run.models.ApiRelationship.OWNER)\n spec = kfp_run.models.ApiPipelineSpec(\n workflow_manifest=pipeline_json_string, parameters=api_params)\n run_body = kfp_run.models.ApiRun(\n pipeline_spec=spec, resource_references=[reference], name=job_name)\n\n response = self._run_api.create_run(body=run_body)\n \n if self._is_ipython():\n import IPython\n html = ('Job link <a href=\"/pipeline/#/runs/details/%s\" target=\"_blank\" >here</a>'\n % response.run.id)\n IPython.display.display(IPython.display.HTML(html))\n return response.run\n\n def list_runs(self, page_token='', page_size=10, sort_by=''):\n \"\"\"List runs.\n Args:\n page_token: token for starting of the page.\n page_size: size of the page.\n sort_by: one of 'field_name', 'field_name des'. For example, 'name des'.\n Returns:\n A response object including a list of experiments and next page token.\n \"\"\"\n response = self._run_api.list_runs(page_token=page_token, page_size=page_size, sort_by=sort_by)\n return response\n\n def get_run(self, run_id):\n \"\"\"Get run details.\n Args:\n id of the run.\n Returns:\n A response object including details of a run.\n Throws:\n Exception if run is not found.\n \"\"\"\n return self._run_api.get_run(run_id=run_id)\n\n def wait_for_run_completion(self, run_id, timeout):\n \"\"\"Wait for a run to complete.\n Args:\n run_id: run id, returned from run_pipeline.\n timeout: timeout in seconds.\n Returns:\n A run detail object: Most important fields are run and pipeline_runtime\n \"\"\"\n status = 'Running:'\n start_time = datetime.now()\n while status is None or status.lower() not in ['succeeded', 'failed', 'skipped', 'error']:\n get_run_response = self._run_api.get_run(run_id=run_id)\n status = get_run_response.run.status\n elapsed_time = (datetime.now() - start_time).seconds\n logging.info('Waiting for the job to complete...')\n if elapsed_time > timeout:\n raise TimeoutError('Run timeout')\n time.sleep(5)\n return get_run_response\n\n def _get_workflow_json(self, run_id):\n \"\"\"Get the workflow json.\n Args:\n run_id: run id, returned from run_pipeline.\n Returns:\n workflow: json workflow\n \"\"\"\n get_run_response = self._run_api.get_run(run_id=run_id)\n workflow = get_run_response.pipeline_runtime.workflow_manifest\n workflow_json = json.loads(workflow)\n return workflow_json\n", "path": "sdk/python/kfp/_client.py"}]} | 2,820 | 146 |
gh_patches_debug_9087 | rasdani/github-patches | git_diff | wagtail__wagtail-2585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RoutablePage throws a TypeError if index route is not set
RoutablePage should not raise TypeError if index route is not set.
[route method](https://github.com/torchbox/wagtail/blob/master/wagtail/contrib/wagtailroutablepage/models.py#L97) properly calls parent class method if route is not found, but `serve` method has different signature, so it fails.
There is also a discussion of similar issue on Google groups:
https://groups.google.com/forum/#!msg/wagtail/Q9FymA-MOxM/1UkQ0hRGAAAJ
</issue>
<code>
[start of wagtail/contrib/wagtailroutablepage/models.py]
1 from __future__ import absolute_import, unicode_literals
2
3 from django.conf.urls import url
4 from django.core.urlresolvers import RegexURLResolver
5 from django.http import Http404
6
7 from wagtail.wagtailcore.models import Page
8 from wagtail.wagtailcore.url_routing import RouteResult
9
10 _creation_counter = 0
11
12
13 def route(pattern, name=None):
14 def decorator(view_func):
15 global _creation_counter
16 _creation_counter += 1
17
18 # Make sure page has _routablepage_routes attribute
19 if not hasattr(view_func, '_routablepage_routes'):
20 view_func._routablepage_routes = []
21
22 # Add new route to view
23 view_func._routablepage_routes.append((
24 url(pattern, view_func, name=(name or view_func.__name__)),
25 _creation_counter,
26 ))
27
28 return view_func
29
30 return decorator
31
32
33 class RoutablePageMixin(object):
34 """
35 This class can be mixed in to a Page model, allowing extra routes to be
36 added to it.
37 """
38 #: Set this to a tuple of ``django.conf.urls.url`` objects.
39 subpage_urls = None
40
41 @classmethod
42 def get_subpage_urls(cls):
43 routes = []
44 for attr in dir(cls):
45 val = getattr(cls, attr)
46 if hasattr(val, '_routablepage_routes'):
47 routes.extend(val._routablepage_routes)
48
49 return tuple([
50 route[0]
51 for route in sorted(routes, key=lambda route: route[1])
52 ])
53
54 @classmethod
55 def get_resolver(cls):
56 if '_routablepage_urlresolver' not in cls.__dict__:
57 subpage_urls = cls.get_subpage_urls()
58 cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)
59
60 return cls._routablepage_urlresolver
61
62 def reverse_subpage(self, name, args=None, kwargs=None):
63 """
64 This method takes a route name/arguments and returns a URL path.
65 """
66 args = args or []
67 kwargs = kwargs or {}
68
69 return self.get_resolver().reverse(name, *args, **kwargs)
70
71 def resolve_subpage(self, path):
72 """
73 This method takes a URL path and finds the view to call.
74 """
75 view, args, kwargs = self.get_resolver().resolve(path)
76
77 # Bind the method
78 view = view.__get__(self, type(self))
79
80 return view, args, kwargs
81
82 def route(self, request, path_components):
83 """
84 This hooks the subpage URLs into Wagtail's routing.
85 """
86 if self.live:
87 try:
88 path = '/'
89 if path_components:
90 path += '/'.join(path_components) + '/'
91
92 view, args, kwargs = self.resolve_subpage(path)
93 return RouteResult(self, args=(view, args, kwargs))
94 except Http404:
95 pass
96
97 return super(RoutablePageMixin, self).route(request, path_components)
98
99 def serve(self, request, view, args, kwargs):
100 return view(request, *args, **kwargs)
101
102 def serve_preview(self, request, mode_name):
103 view, args, kwargs = self.resolve_subpage('/')
104 return view(request, *args, **kwargs)
105
106
107 class RoutablePage(RoutablePageMixin, Page):
108 """
109 This class extends Page by adding methods which allows extra routes to be
110 added to it.
111 """
112
113 class Meta:
114 abstract = True
115
[end of wagtail/contrib/wagtailroutablepage/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py
--- a/wagtail/contrib/wagtailroutablepage/models.py
+++ b/wagtail/contrib/wagtailroutablepage/models.py
@@ -96,7 +96,13 @@
return super(RoutablePageMixin, self).route(request, path_components)
- def serve(self, request, view, args, kwargs):
+ def serve(self, request, view=None, args=None, kwargs=None):
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+ if view is None:
+ return super(RoutablePageMixin, self).serve(request, *args, **kwargs)
return view(request, *args, **kwargs)
def serve_preview(self, request, mode_name):
| {"golden_diff": "diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py\n--- a/wagtail/contrib/wagtailroutablepage/models.py\n+++ b/wagtail/contrib/wagtailroutablepage/models.py\n@@ -96,7 +96,13 @@\n \n return super(RoutablePageMixin, self).route(request, path_components)\n \n- def serve(self, request, view, args, kwargs):\n+ def serve(self, request, view=None, args=None, kwargs=None):\n+ if args is None:\n+ args = []\n+ if kwargs is None:\n+ kwargs = {}\n+ if view is None:\n+ return super(RoutablePageMixin, self).serve(request, *args, **kwargs)\n return view(request, *args, **kwargs)\n \n def serve_preview(self, request, mode_name):\n", "issue": "RoutablePage throws a TypeError if index route is not set\nRoutablePage should not raise TypeError if index route is not set. \n\n[route method](https://github.com/torchbox/wagtail/blob/master/wagtail/contrib/wagtailroutablepage/models.py#L97) properly calls parent class method if route is not found, but `serve` method has different signature, so it fails.\n\nThere is also a discussion of similar issue on Google groups:\nhttps://groups.google.com/forum/#!msg/wagtail/Q9FymA-MOxM/1UkQ0hRGAAAJ\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.conf.urls import url\nfrom django.core.urlresolvers import RegexURLResolver\nfrom django.http import Http404\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.url_routing import RouteResult\n\n_creation_counter = 0\n\n\ndef route(pattern, name=None):\n def decorator(view_func):\n global _creation_counter\n _creation_counter += 1\n\n # Make sure page has _routablepage_routes attribute\n if not hasattr(view_func, '_routablepage_routes'):\n view_func._routablepage_routes = []\n\n # Add new route to view\n view_func._routablepage_routes.append((\n url(pattern, view_func, name=(name or view_func.__name__)),\n _creation_counter,\n ))\n\n return view_func\n\n return decorator\n\n\nclass RoutablePageMixin(object):\n \"\"\"\n This class can be mixed in to a Page model, allowing extra routes to be\n added to it.\n \"\"\"\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n\n @classmethod\n def get_subpage_urls(cls):\n routes = []\n for attr in dir(cls):\n val = getattr(cls, attr)\n if hasattr(val, '_routablepage_routes'):\n routes.extend(val._routablepage_routes)\n\n return tuple([\n route[0]\n for route in sorted(routes, key=lambda route: route[1])\n ])\n\n @classmethod\n def get_resolver(cls):\n if '_routablepage_urlresolver' not in cls.__dict__:\n subpage_urls = cls.get_subpage_urls()\n cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)\n\n return cls._routablepage_urlresolver\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method takes a route name/arguments and returns a URL path.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n return self.get_resolver().reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This method takes a URL path and finds the view to call.\n \"\"\"\n view, args, kwargs = self.get_resolver().resolve(path)\n\n # Bind the method\n view = view.__get__(self, type(self))\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage URLs into Wagtail's routing.\n \"\"\"\n if self.live:\n try:\n path = '/'\n if path_components:\n path += '/'.join(path_components) + '/'\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super(RoutablePageMixin, self).route(request, path_components)\n\n def serve(self, request, view, args, kwargs):\n return view(request, *args, **kwargs)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage('/')\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods which allows extra routes to be\n added to it.\n \"\"\"\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/wagtailroutablepage/models.py"}]} | 1,682 | 206 |
gh_patches_debug_15119 | rasdani/github-patches | git_diff | pyro-ppl__pyro-734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DMM Example Model and guide dims disagree when IAF is applied
When the number of IAF is non zero, the latent dimension changes to the same value as the batch size.
</issue>
<code>
[start of pyro/distributions/transformed_distribution.py]
1 from __future__ import absolute_import, division, print_function
2
3 import numbers
4 from abc import ABCMeta, abstractmethod
5
6 import torch
7 import torch.nn as nn
8 from six import add_metaclass
9 from torch.autograd import Variable
10
11 from pyro.distributions.distribution import Distribution
12 from pyro.distributions.util import copy_docs_from
13 from pyro.nn import AutoRegressiveNN
14
15
16 @copy_docs_from(Distribution)
17 class TransformedDistribution(Distribution):
18 """
19 Transforms the base distribution by applying a sequence of `Bijector`s to it.
20 This results in a scorable distribution (i.e. it has a `log_pdf()` method).
21
22 :param base_distribution: a (continuous) base distribution; samples from this distribution
23 are passed through the sequence of `Bijector`s to yield a sample from the
24 `TransformedDistribution`
25 :type base_distribution: pyro.distribution.Distribution
26 :param bijectors: either a single Bijector or a sequence of Bijectors wrapped in a nn.ModuleList
27 :returns: the transformed distribution
28 """
29
30 def __init__(self, base_distribution, bijectors, *args, **kwargs):
31 super(TransformedDistribution, self).__init__(*args, **kwargs)
32 self.reparameterized = base_distribution.reparameterized
33 self.base_dist = base_distribution
34 if isinstance(bijectors, Bijector):
35 self.bijectors = nn.ModuleList([bijectors])
36 elif isinstance(bijectors, nn.ModuleList):
37 for bijector in bijectors:
38 assert isinstance(bijector, Bijector), \
39 "bijectors must be a Bijector or a nn.ModuleList of Bijectors"
40 self.bijectors = bijectors
41
42 def sample(self, *args, **kwargs):
43 """
44 :returns: a sample y
45 :rtype: torch.autograd.Variable
46
47 Sample from base distribution and pass through bijector(s)
48 """
49 x = self.base_dist.sample(*args, **kwargs)
50 next_input = x
51 for bijector in self.bijectors:
52 y = bijector(next_input)
53 if bijector.add_inverse_to_cache:
54 bijector._add_intermediate_to_cache(next_input, y, 'x')
55 next_input = y
56 return next_input
57
58 def batch_shape(self, x=None, *args, **kwargs):
59 return self.base_dist.batch_shape(*args, **kwargs)
60
61 def event_shape(self, *args, **kwargs):
62 return self.base_dist.event_shape(*args, **kwargs)
63
64 def log_prob(self, y, *args, **kwargs):
65 """
66 :param y: a value sampled from the transformed distribution
67 :type y: torch.autograd.Variable
68
69 :returns: the score (the log pdf) of y
70 :rtype: torch.autograd.Variable
71
72 Scores the sample by inverting the bijector(s) and computing the score using the score
73 of the base distribution and the log det jacobian
74 """
75 value = y
76 log_det_jacobian = 0.0
77 for bijector in reversed(self.bijectors):
78 log_det_jacobian += bijector.batch_log_det_jacobian(value, *args, **kwargs)
79 value = bijector.inverse(value)
80 base_log_pdf = self.base_dist.log_prob(value, *args, **kwargs)
81 if not isinstance(log_det_jacobian, numbers.Number):
82 log_det_jacobian = log_det_jacobian.contiguous().view(*base_log_pdf.size())
83 assert log_det_jacobian.size() == base_log_pdf.size(), \
84 'Invalid batch_log_det_jacobian().size():\nexpected {}\nactual {}'.format(
85 base_log_pdf.size(), log_det_jacobian.size())
86 return base_log_pdf - log_det_jacobian
87
88
89 @add_metaclass(ABCMeta)
90 class Bijector(nn.Module):
91 """
92 Abstract class `Bijector`. `Bijector` are bijective transformations with computable
93 log det jacobians. They are meant for use in `TransformedDistribution`.
94 """
95
96 def __init__(self, *args, **kwargs):
97 super(Bijector, self).__init__(*args, **kwargs)
98 self.add_inverse_to_cache = False
99
100 @abstractmethod
101 def __call__(self, *args, **kwargs):
102 """
103 Virtual forward method
104
105 Invokes the bijection x=>y
106 """
107 raise NotImplementedError()
108
109 @abstractmethod
110 def inverse(self, *args, **kwargs):
111 """
112 Virtual inverse method
113
114 Inverts the bijection y => x.
115 """
116 raise NotImplementedError()
117
118 def log_det_jacobian(self, *args, **kwargs):
119 """
120 Default logdet jacobian method.
121
122 Computes the log det jacobian `|dy/dx|`
123 """
124 return self.batch_log_det_jacobian(*args, **kwargs).sum()
125
126 def batch_log_det_jacobian(self, *args, **kwargs):
127 """
128 Virtual elementwise logdet jacobian method.
129
130 Computes the log abs det jacobian `|dy/dx|`
131 """
132 raise NotImplementedError()
133
134
135 class InverseAutoregressiveFlow(Bijector):
136 """
137 An implementation of an Inverse Autoregressive Flow. Together with the `TransformedDistribution` this
138 provides a way to create richer variational approximations.
139
140 Example usage::
141
142 >>> base_dist = Normal(...)
143 >>> iaf = InverseAutoregressiveFlow(...)
144 >>> pyro.module("my_iaf", iaf)
145 >>> iaf_dist = TransformedDistribution(base_dist, iaf)
146
147 Note that this implementation is only meant to be used in settings where the inverse of the Bijector
148 is never explicitly computed (rather the result is cached from the forward call). In the context of
149 variational inference, this means that the InverseAutoregressiveFlow should only be used in the guide,
150 i.e. in the variational distribution. In other contexts the inverse could in principle be computed but
151 this would be a (potentially) costly computation that scales with the dimension of the input (and in
152 any case support for this is not included in this implementation).
153
154 :param input_dim: dimension of input
155 :type input_dim: int
156 :param hidden_dim: hidden dimension (number of hidden units)
157 :type hidden_dim: int
158 :param sigmoid_bias: bias on the hidden units fed into the sigmoid; default=`2.0`
159 :type sigmoid_bias: float
160 :param permutation: whether the order of the inputs should be permuted (by default the conditional
161 dependence structure of the autoregression follows the sequential order)
162 :type permutation: bool
163
164 References:
165
166 1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]
167 Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling
168
169 2. Variational Inference with Normalizing Flows [arXiv:1505.05770]
170 Danilo Jimenez Rezende, Shakir Mohamed
171
172 3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]
173 Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle
174 """
175
176 def __init__(self, input_dim, hidden_dim, sigmoid_bias=2.0, permutation=None):
177 super(InverseAutoregressiveFlow, self).__init__()
178 self.input_dim = input_dim
179 self.hidden_dim = hidden_dim
180 self.arn = AutoRegressiveNN(input_dim, hidden_dim, output_dim_multiplier=2, permutation=permutation)
181 self.sigmoid = nn.Sigmoid()
182 self.sigmoid_bias = Variable(torch.Tensor([sigmoid_bias]))
183 self._intermediates_cache = {}
184 self.add_inverse_to_cache = True
185
186 def get_arn(self):
187 """
188 :rtype: pyro.nn.AutoRegressiveNN
189
190 Return the AutoRegressiveNN associated with the InverseAutoregressiveFlow
191 """
192 return self.arn
193
194 def __call__(self, x, *args, **kwargs):
195 """
196 :param x: the input into the bijection
197 :type x: torch.autograd.Variable
198
199 Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a
200 sample from the base distribution (or the output of a previous flow)
201 """
202 hidden = self.arn(x)
203 sigma = self.sigmoid(hidden[:, 0:self.input_dim] + self.sigmoid_bias.type_as(hidden))
204 mean = hidden[:, self.input_dim:]
205 y = sigma * x + (Variable(torch.ones(sigma.size())).type_as(sigma) - sigma) * mean
206 self._add_intermediate_to_cache(sigma, y, 'sigma')
207 return y
208
209 def inverse(self, y, *args, **kwargs):
210 """
211 :param y: the output of the bijection
212 :type y: torch.autograd.Variable
213
214 Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values
215 `y`; rather it assumes `y` is the result of a previously computed application of the bijector
216 to some `x` (which was cached on the forward call)
217 """
218 if (y, 'x') in self._intermediates_cache:
219 x = self._intermediates_cache.pop((y, 'x'))
220 return x
221 else:
222 raise KeyError("Bijector InverseAutoregressiveFlow expected to find" +
223 "key in intermediates cache but didn't")
224
225 def _add_intermediate_to_cache(self, intermediate, y, name):
226 """
227 Internal function used to cache intermediate results computed during the forward call
228 """
229 assert((y, name) not in self._intermediates_cache),\
230 "key collision in _add_intermediate_to_cache"
231 self._intermediates_cache[(y, name)] = intermediate
232
233 def batch_log_det_jacobian(self, y, *args, **kwargs):
234 """
235 Calculates the elementwise determinant of the log jacobian
236 """
237 if (y, 'sigma') in self._intermediates_cache:
238 sigma = self._intermediates_cache.pop((y, 'sigma'))
239 else:
240 raise KeyError("Bijector InverseAutoregressiveFlow expected to find" +
241 "key in intermediates cache but didn't")
242 log_sigma = torch.log(sigma)
243 if 'log_pdf_mask' in kwargs:
244 log_sigma = log_sigma * kwargs['log_pdf_mask']
245 return log_sigma.sum(-1)
246
[end of pyro/distributions/transformed_distribution.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/transformed_distribution.py b/pyro/distributions/transformed_distribution.py
--- a/pyro/distributions/transformed_distribution.py
+++ b/pyro/distributions/transformed_distribution.py
@@ -55,7 +55,10 @@
next_input = y
return next_input
- def batch_shape(self, x=None, *args, **kwargs):
+ def shape(self, *args, **kwargs):
+ return self.base_dist.shape(*args, **kwargs)
+
+ def batch_shape(self, *args, **kwargs):
return self.base_dist.batch_shape(*args, **kwargs)
def event_shape(self, *args, **kwargs):
@@ -242,4 +245,4 @@
log_sigma = torch.log(sigma)
if 'log_pdf_mask' in kwargs:
log_sigma = log_sigma * kwargs['log_pdf_mask']
- return log_sigma.sum(-1)
+ return log_sigma
| {"golden_diff": "diff --git a/pyro/distributions/transformed_distribution.py b/pyro/distributions/transformed_distribution.py\n--- a/pyro/distributions/transformed_distribution.py\n+++ b/pyro/distributions/transformed_distribution.py\n@@ -55,7 +55,10 @@\n next_input = y\n return next_input\n \n- def batch_shape(self, x=None, *args, **kwargs):\n+ def shape(self, *args, **kwargs):\n+ return self.base_dist.shape(*args, **kwargs)\n+\n+ def batch_shape(self, *args, **kwargs):\n return self.base_dist.batch_shape(*args, **kwargs)\n \n def event_shape(self, *args, **kwargs):\n@@ -242,4 +245,4 @@\n log_sigma = torch.log(sigma)\n if 'log_pdf_mask' in kwargs:\n log_sigma = log_sigma * kwargs['log_pdf_mask']\n- return log_sigma.sum(-1)\n+ return log_sigma\n", "issue": "DMM Example Model and guide dims disagree when IAF is applied\nWhen the number of IAF is non zero, the latent dimension changes to the same value as the batch size.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport numbers\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nimport torch.nn as nn\nfrom six import add_metaclass\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.util import copy_docs_from\nfrom pyro.nn import AutoRegressiveNN\n\n\n@copy_docs_from(Distribution)\nclass TransformedDistribution(Distribution):\n \"\"\"\n Transforms the base distribution by applying a sequence of `Bijector`s to it.\n This results in a scorable distribution (i.e. it has a `log_pdf()` method).\n\n :param base_distribution: a (continuous) base distribution; samples from this distribution\n are passed through the sequence of `Bijector`s to yield a sample from the\n `TransformedDistribution`\n :type base_distribution: pyro.distribution.Distribution\n :param bijectors: either a single Bijector or a sequence of Bijectors wrapped in a nn.ModuleList\n :returns: the transformed distribution\n \"\"\"\n\n def __init__(self, base_distribution, bijectors, *args, **kwargs):\n super(TransformedDistribution, self).__init__(*args, **kwargs)\n self.reparameterized = base_distribution.reparameterized\n self.base_dist = base_distribution\n if isinstance(bijectors, Bijector):\n self.bijectors = nn.ModuleList([bijectors])\n elif isinstance(bijectors, nn.ModuleList):\n for bijector in bijectors:\n assert isinstance(bijector, Bijector), \\\n \"bijectors must be a Bijector or a nn.ModuleList of Bijectors\"\n self.bijectors = bijectors\n\n def sample(self, *args, **kwargs):\n \"\"\"\n :returns: a sample y\n :rtype: torch.autograd.Variable\n\n Sample from base distribution and pass through bijector(s)\n \"\"\"\n x = self.base_dist.sample(*args, **kwargs)\n next_input = x\n for bijector in self.bijectors:\n y = bijector(next_input)\n if bijector.add_inverse_to_cache:\n bijector._add_intermediate_to_cache(next_input, y, 'x')\n next_input = y\n return next_input\n\n def batch_shape(self, x=None, *args, **kwargs):\n return self.base_dist.batch_shape(*args, **kwargs)\n\n def event_shape(self, *args, **kwargs):\n return self.base_dist.event_shape(*args, **kwargs)\n\n def log_prob(self, y, *args, **kwargs):\n \"\"\"\n :param y: a value sampled from the transformed distribution\n :type y: torch.autograd.Variable\n\n :returns: the score (the log pdf) of y\n :rtype: torch.autograd.Variable\n\n Scores the sample by inverting the bijector(s) and computing the score using the score\n of the base distribution and the log det jacobian\n \"\"\"\n value = y\n log_det_jacobian = 0.0\n for bijector in reversed(self.bijectors):\n log_det_jacobian += bijector.batch_log_det_jacobian(value, *args, **kwargs)\n value = bijector.inverse(value)\n base_log_pdf = self.base_dist.log_prob(value, *args, **kwargs)\n if not isinstance(log_det_jacobian, numbers.Number):\n log_det_jacobian = log_det_jacobian.contiguous().view(*base_log_pdf.size())\n assert log_det_jacobian.size() == base_log_pdf.size(), \\\n 'Invalid batch_log_det_jacobian().size():\\nexpected {}\\nactual {}'.format(\n base_log_pdf.size(), log_det_jacobian.size())\n return base_log_pdf - log_det_jacobian\n\n\n@add_metaclass(ABCMeta)\nclass Bijector(nn.Module):\n \"\"\"\n Abstract class `Bijector`. `Bijector` are bijective transformations with computable\n log det jacobians. They are meant for use in `TransformedDistribution`.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Bijector, self).__init__(*args, **kwargs)\n self.add_inverse_to_cache = False\n\n @abstractmethod\n def __call__(self, *args, **kwargs):\n \"\"\"\n Virtual forward method\n\n Invokes the bijection x=>y\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def inverse(self, *args, **kwargs):\n \"\"\"\n Virtual inverse method\n\n Inverts the bijection y => x.\n \"\"\"\n raise NotImplementedError()\n\n def log_det_jacobian(self, *args, **kwargs):\n \"\"\"\n Default logdet jacobian method.\n\n Computes the log det jacobian `|dy/dx|`\n \"\"\"\n return self.batch_log_det_jacobian(*args, **kwargs).sum()\n\n def batch_log_det_jacobian(self, *args, **kwargs):\n \"\"\"\n Virtual elementwise logdet jacobian method.\n\n Computes the log abs det jacobian `|dy/dx|`\n \"\"\"\n raise NotImplementedError()\n\n\nclass InverseAutoregressiveFlow(Bijector):\n \"\"\"\n An implementation of an Inverse Autoregressive Flow. Together with the `TransformedDistribution` this\n provides a way to create richer variational approximations.\n\n Example usage::\n\n >>> base_dist = Normal(...)\n >>> iaf = InverseAutoregressiveFlow(...)\n >>> pyro.module(\"my_iaf\", iaf)\n >>> iaf_dist = TransformedDistribution(base_dist, iaf)\n\n Note that this implementation is only meant to be used in settings where the inverse of the Bijector\n is never explicitly computed (rather the result is cached from the forward call). In the context of\n variational inference, this means that the InverseAutoregressiveFlow should only be used in the guide,\n i.e. in the variational distribution. In other contexts the inverse could in principle be computed but\n this would be a (potentially) costly computation that scales with the dimension of the input (and in\n any case support for this is not included in this implementation).\n\n :param input_dim: dimension of input\n :type input_dim: int\n :param hidden_dim: hidden dimension (number of hidden units)\n :type hidden_dim: int\n :param sigmoid_bias: bias on the hidden units fed into the sigmoid; default=`2.0`\n :type sigmoid_bias: float\n :param permutation: whether the order of the inputs should be permuted (by default the conditional\n dependence structure of the autoregression follows the sequential order)\n :type permutation: bool\n\n References:\n\n 1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]\n Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling\n\n 2. Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n 3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]\n Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle\n \"\"\"\n\n def __init__(self, input_dim, hidden_dim, sigmoid_bias=2.0, permutation=None):\n super(InverseAutoregressiveFlow, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.arn = AutoRegressiveNN(input_dim, hidden_dim, output_dim_multiplier=2, permutation=permutation)\n self.sigmoid = nn.Sigmoid()\n self.sigmoid_bias = Variable(torch.Tensor([sigmoid_bias]))\n self._intermediates_cache = {}\n self.add_inverse_to_cache = True\n\n def get_arn(self):\n \"\"\"\n :rtype: pyro.nn.AutoRegressiveNN\n\n Return the AutoRegressiveNN associated with the InverseAutoregressiveFlow\n \"\"\"\n return self.arn\n\n def __call__(self, x, *args, **kwargs):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.autograd.Variable\n\n Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a\n sample from the base distribution (or the output of a previous flow)\n \"\"\"\n hidden = self.arn(x)\n sigma = self.sigmoid(hidden[:, 0:self.input_dim] + self.sigmoid_bias.type_as(hidden))\n mean = hidden[:, self.input_dim:]\n y = sigma * x + (Variable(torch.ones(sigma.size())).type_as(sigma) - sigma) * mean\n self._add_intermediate_to_cache(sigma, y, 'sigma')\n return y\n\n def inverse(self, y, *args, **kwargs):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.autograd.Variable\n\n Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values\n `y`; rather it assumes `y` is the result of a previously computed application of the bijector\n to some `x` (which was cached on the forward call)\n \"\"\"\n if (y, 'x') in self._intermediates_cache:\n x = self._intermediates_cache.pop((y, 'x'))\n return x\n else:\n raise KeyError(\"Bijector InverseAutoregressiveFlow expected to find\" +\n \"key in intermediates cache but didn't\")\n\n def _add_intermediate_to_cache(self, intermediate, y, name):\n \"\"\"\n Internal function used to cache intermediate results computed during the forward call\n \"\"\"\n assert((y, name) not in self._intermediates_cache),\\\n \"key collision in _add_intermediate_to_cache\"\n self._intermediates_cache[(y, name)] = intermediate\n\n def batch_log_det_jacobian(self, y, *args, **kwargs):\n \"\"\"\n Calculates the elementwise determinant of the log jacobian\n \"\"\"\n if (y, 'sigma') in self._intermediates_cache:\n sigma = self._intermediates_cache.pop((y, 'sigma'))\n else:\n raise KeyError(\"Bijector InverseAutoregressiveFlow expected to find\" +\n \"key in intermediates cache but didn't\")\n log_sigma = torch.log(sigma)\n if 'log_pdf_mask' in kwargs:\n log_sigma = log_sigma * kwargs['log_pdf_mask']\n return log_sigma.sum(-1)\n", "path": "pyro/distributions/transformed_distribution.py"}]} | 3,554 | 213 |
gh_patches_debug_9881 | rasdani/github-patches | git_diff | Kinto__kinto-1862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updated Error on Version File Missing
4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty
Raise error instead of returning 404 on __version__ if version file is missing
https://github.com/Kinto/kinto/blame/master/kinto/core/views/version.py#L49
suggested by @peterbe
4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty ;)
</issue>
<code>
[start of kinto/core/views/version.py]
1 import json
2 import os
3
4 import colander
5 from pyramid import httpexceptions
6 from pyramid.security import NO_PERMISSION_REQUIRED
7 from kinto.core import Service
8
9 HERE = os.path.dirname(__file__)
10 ORIGIN = os.path.dirname(HERE)
11
12
13 class VersionResponseSchema(colander.MappingSchema):
14 body = colander.SchemaNode(colander.Mapping(unknown="preserve"))
15
16
17 version_response_schemas = {
18 "200": VersionResponseSchema(description="Return the running Instance version information.")
19 }
20
21
22 version = Service(name="version", path="/__version__", description="Version")
23
24
25 @version.get(
26 permission=NO_PERMISSION_REQUIRED,
27 tags=["Utilities"],
28 operation_id="__version__",
29 response_schemas=version_response_schemas,
30 )
31 def version_view(request):
32 try:
33 return version_view.__json__
34 except AttributeError:
35 pass
36
37 location = request.registry.settings["version_json_path"]
38 files = [
39 location, # Default is current working dir.
40 os.path.join(ORIGIN, "version.json"), # Relative to the package root.
41 os.path.join(HERE, "version.json"), # Relative to this file.
42 ]
43 for version_file in files:
44 if os.path.exists(version_file):
45 with open(version_file) as f:
46 version_view.__json__ = json.load(f)
47 return version_view.__json__ # First one wins.
48
49 raise httpexceptions.HTTPNotFound()
50
[end of kinto/core/views/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/views/version.py b/kinto/core/views/version.py
--- a/kinto/core/views/version.py
+++ b/kinto/core/views/version.py
@@ -2,7 +2,6 @@
import os
import colander
-from pyramid import httpexceptions
from pyramid.security import NO_PERMISSION_REQUIRED
from kinto.core import Service
@@ -46,4 +45,4 @@
version_view.__json__ = json.load(f)
return version_view.__json__ # First one wins.
- raise httpexceptions.HTTPNotFound()
+ raise FileNotFoundError("Version file missing from {}".format(files.join(",")))
| {"golden_diff": "diff --git a/kinto/core/views/version.py b/kinto/core/views/version.py\n--- a/kinto/core/views/version.py\n+++ b/kinto/core/views/version.py\n@@ -2,7 +2,6 @@\n import os\n \n import colander\n-from pyramid import httpexceptions\n from pyramid.security import NO_PERMISSION_REQUIRED\n from kinto.core import Service\n \n@@ -46,4 +45,4 @@\n version_view.__json__ = json.load(f)\n return version_view.__json__ # First one wins.\n \n- raise httpexceptions.HTTPNotFound()\n+ raise FileNotFoundError(\"Version file missing from {}\".format(files.join(\",\")))\n", "issue": "Updated Error on Version File Missing\n4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty\nRaise error instead of returning 404 on __version__ if version file is missing\nhttps://github.com/Kinto/kinto/blame/master/kinto/core/views/version.py#L49\r\n\r\nsuggested by @peterbe \r\n\r\n4XX errors are client errors. In that case, if the file is not on the server, the client is not guilty ;)\n", "before_files": [{"content": "import json\nimport os\n\nimport colander\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom kinto.core import Service\n\nHERE = os.path.dirname(__file__)\nORIGIN = os.path.dirname(HERE)\n\n\nclass VersionResponseSchema(colander.MappingSchema):\n body = colander.SchemaNode(colander.Mapping(unknown=\"preserve\"))\n\n\nversion_response_schemas = {\n \"200\": VersionResponseSchema(description=\"Return the running Instance version information.\")\n}\n\n\nversion = Service(name=\"version\", path=\"/__version__\", description=\"Version\")\n\n\[email protected](\n permission=NO_PERMISSION_REQUIRED,\n tags=[\"Utilities\"],\n operation_id=\"__version__\",\n response_schemas=version_response_schemas,\n)\ndef version_view(request):\n try:\n return version_view.__json__\n except AttributeError:\n pass\n\n location = request.registry.settings[\"version_json_path\"]\n files = [\n location, # Default is current working dir.\n os.path.join(ORIGIN, \"version.json\"), # Relative to the package root.\n os.path.join(HERE, \"version.json\"), # Relative to this file.\n ]\n for version_file in files:\n if os.path.exists(version_file):\n with open(version_file) as f:\n version_view.__json__ = json.load(f)\n return version_view.__json__ # First one wins.\n\n raise httpexceptions.HTTPNotFound()\n", "path": "kinto/core/views/version.py"}]} | 1,039 | 134 |
gh_patches_debug_8651 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1179 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Run hooks on files in specific dir, rather than using `--all-files`
I am able to successfully use `pre-commit run --all-files` to run hooks on all my files.
Now, I'm trying to run the hooks just on files in a particular directory. I think I'm not understanding the docs and I can't find an example to work from.
Here's what the docs say:
> `--files [FILES [FILES ...]]`: specific filenames to run hooks on.
I've tried the following variations:
`pre-commit run --files web/modules/custom`
`pre-commit run --files web/modules/custom/*`
`pre-commit run --files [web/modules/custom]`
`pre-commit run --files [web/modules/custom/*]`
`pre-commit run --files [FILES [web/modules/custom]`
`pre-commit run --files [FILES [web/modules/custom/*]`
I feel really dumb having to ask, but can someone please point me in the right direction?
</issue>
<code>
[start of pre_commit/commands/run.py]
1 from __future__ import unicode_literals
2
3 import logging
4 import os
5 import re
6 import subprocess
7 import sys
8
9 from identify.identify import tags_from_path
10
11 from pre_commit import color
12 from pre_commit import git
13 from pre_commit import output
14 from pre_commit.clientlib import load_config
15 from pre_commit.output import get_hook_message
16 from pre_commit.repository import all_hooks
17 from pre_commit.repository import install_hook_envs
18 from pre_commit.staged_files_only import staged_files_only
19 from pre_commit.util import cmd_output_b
20 from pre_commit.util import noop_context
21
22
23 logger = logging.getLogger('pre_commit')
24
25
26 def filter_by_include_exclude(names, include, exclude):
27 include_re, exclude_re = re.compile(include), re.compile(exclude)
28 return [
29 filename for filename in names
30 if include_re.search(filename)
31 if not exclude_re.search(filename)
32 ]
33
34
35 class Classifier(object):
36 def __init__(self, filenames):
37 self.filenames = [f for f in filenames if os.path.lexists(f)]
38 self._types_cache = {}
39
40 def _types_for_file(self, filename):
41 try:
42 return self._types_cache[filename]
43 except KeyError:
44 ret = self._types_cache[filename] = tags_from_path(filename)
45 return ret
46
47 def by_types(self, names, types, exclude_types):
48 types, exclude_types = frozenset(types), frozenset(exclude_types)
49 ret = []
50 for filename in names:
51 tags = self._types_for_file(filename)
52 if tags >= types and not tags & exclude_types:
53 ret.append(filename)
54 return ret
55
56 def filenames_for_hook(self, hook):
57 names = self.filenames
58 names = filter_by_include_exclude(names, hook.files, hook.exclude)
59 names = self.by_types(names, hook.types, hook.exclude_types)
60 return names
61
62
63 def _get_skips(environ):
64 skips = environ.get('SKIP', '')
65 return {skip.strip() for skip in skips.split(',') if skip.strip()}
66
67
68 def _hook_msg_start(hook, verbose):
69 return '{}{}'.format('[{}] '.format(hook.id) if verbose else '', hook.name)
70
71
72 SKIPPED = 'Skipped'
73 NO_FILES = '(no files to check)'
74
75
76 def _run_single_hook(classifier, hook, args, skips, cols, use_color):
77 filenames = classifier.filenames_for_hook(hook)
78
79 if hook.language == 'pcre':
80 logger.warning(
81 '`{}` (from {}) uses the deprecated pcre language.\n'
82 'The pcre language is scheduled for removal in pre-commit 2.x.\n'
83 'The pygrep language is a more portable (and usually drop-in) '
84 'replacement.'.format(hook.id, hook.src),
85 )
86
87 if hook.id in skips or hook.alias in skips:
88 output.write(
89 get_hook_message(
90 _hook_msg_start(hook, args.verbose),
91 end_msg=SKIPPED,
92 end_color=color.YELLOW,
93 use_color=args.color,
94 cols=cols,
95 ),
96 )
97 return 0
98 elif not filenames and not hook.always_run:
99 output.write(
100 get_hook_message(
101 _hook_msg_start(hook, args.verbose),
102 postfix=NO_FILES,
103 end_msg=SKIPPED,
104 end_color=color.TURQUOISE,
105 use_color=args.color,
106 cols=cols,
107 ),
108 )
109 return 0
110
111 # Print the hook and the dots first in case the hook takes hella long to
112 # run.
113 output.write(
114 get_hook_message(
115 _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,
116 ),
117 )
118 sys.stdout.flush()
119
120 diff_before = cmd_output_b('git', 'diff', '--no-ext-diff', retcode=None)
121 filenames = tuple(filenames) if hook.pass_filenames else ()
122 retcode, out = hook.run(filenames, use_color)
123 diff_after = cmd_output_b('git', 'diff', '--no-ext-diff', retcode=None)
124
125 file_modifications = diff_before != diff_after
126
127 # If the hook makes changes, fail the commit
128 if file_modifications:
129 retcode = 1
130
131 if retcode:
132 retcode = 1
133 print_color = color.RED
134 pass_fail = 'Failed'
135 else:
136 retcode = 0
137 print_color = color.GREEN
138 pass_fail = 'Passed'
139
140 output.write_line(color.format_color(pass_fail, print_color, args.color))
141
142 if (
143 (out or file_modifications) and
144 (retcode or args.verbose or hook.verbose)
145 ):
146 output.write_line('hookid: {}\n'.format(hook.id))
147
148 # Print a message if failing due to file modifications
149 if file_modifications:
150 output.write('Files were modified by this hook.')
151
152 if out:
153 output.write_line(' Additional output:')
154
155 output.write_line()
156
157 if out.strip():
158 output.write_line(out.strip(), logfile_name=hook.log_file)
159 output.write_line()
160
161 return retcode
162
163
164 def _compute_cols(hooks, verbose):
165 """Compute the number of columns to display hook messages. The widest
166 that will be displayed is in the no files skipped case:
167
168 Hook name...(no files to check) Skipped
169
170 or in the verbose case
171
172 Hook name [hookid]...(no files to check) Skipped
173 """
174 if hooks:
175 name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)
176 else:
177 name_len = 0
178
179 cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
180 return max(cols, 80)
181
182
183 def _all_filenames(args):
184 if args.origin and args.source:
185 return git.get_changed_files(args.origin, args.source)
186 elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:
187 return (args.commit_msg_filename,)
188 elif args.files:
189 return args.files
190 elif args.all_files:
191 return git.get_all_files()
192 elif git.is_in_merge_conflict():
193 return git.get_conflicted_files()
194 else:
195 return git.get_staged_files()
196
197
198 def _run_hooks(config, hooks, args, environ):
199 """Actually run the hooks."""
200 skips = _get_skips(environ)
201 cols = _compute_cols(hooks, args.verbose)
202 filenames = _all_filenames(args)
203 filenames = filter_by_include_exclude(filenames, '', config['exclude'])
204 classifier = Classifier(filenames)
205 retval = 0
206 for hook in hooks:
207 retval |= _run_single_hook(
208 classifier, hook, args, skips, cols, args.color,
209 )
210 if retval and config['fail_fast']:
211 break
212 if retval and args.show_diff_on_failure and git.has_diff():
213 if args.all_files:
214 output.write_line(
215 'pre-commit hook(s) made changes.\n'
216 'If you are seeing this message in CI, '
217 'reproduce locally with: `pre-commit run --all-files`.\n'
218 'To run `pre-commit` as part of git workflow, use '
219 '`pre-commit install`.',
220 )
221 output.write_line('All changes made by hooks:')
222 # args.color is a boolean.
223 # See user_color function in color.py
224 subprocess.call((
225 'git', '--no-pager', 'diff', '--no-ext-diff',
226 '--color={}'.format({True: 'always', False: 'never'}[args.color]),
227 ))
228
229 return retval
230
231
232 def _has_unmerged_paths():
233 _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')
234 return bool(stdout.strip())
235
236
237 def _has_unstaged_config(config_file):
238 retcode, _, _ = cmd_output_b(
239 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,
240 retcode=None,
241 )
242 # be explicit, other git errors don't mean it has an unstaged config.
243 return retcode == 1
244
245
246 def run(config_file, store, args, environ=os.environ):
247 no_stash = args.all_files or bool(args.files)
248
249 # Check if we have unresolved merge conflict files and fail fast.
250 if _has_unmerged_paths():
251 logger.error('Unmerged files. Resolve before committing.')
252 return 1
253 if bool(args.source) != bool(args.origin):
254 logger.error('Specify both --origin and --source.')
255 return 1
256 if _has_unstaged_config(config_file) and not no_stash:
257 logger.error(
258 'Your pre-commit configuration is unstaged.\n'
259 '`git add {}` to fix this.'.format(config_file),
260 )
261 return 1
262
263 # Expose origin / source as environment variables for hooks to consume
264 if args.origin and args.source:
265 environ['PRE_COMMIT_ORIGIN'] = args.origin
266 environ['PRE_COMMIT_SOURCE'] = args.source
267
268 if no_stash:
269 ctx = noop_context()
270 else:
271 ctx = staged_files_only(store.directory)
272
273 with ctx:
274 config = load_config(config_file)
275 hooks = [
276 hook
277 for hook in all_hooks(config, store)
278 if not args.hook or hook.id == args.hook or hook.alias == args.hook
279 if args.hook_stage in hook.stages
280 ]
281
282 if args.hook and not hooks:
283 output.write_line(
284 'No hook with id `{}` in stage `{}`'.format(
285 args.hook, args.hook_stage,
286 ),
287 )
288 return 1
289
290 install_hook_envs(hooks, store)
291
292 return _run_hooks(config, hooks, args, environ)
293
[end of pre_commit/commands/run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -34,6 +34,12 @@
class Classifier(object):
def __init__(self, filenames):
+ # on windows we normalize all filenames to use forward slashes
+ # this makes it easier to filter using the `files:` regex
+ # this also makes improperly quoted shell-based hooks work better
+ # see #1173
+ if os.altsep == '/' and os.sep == '\\':
+ filenames = (f.replace(os.sep, os.altsep) for f in filenames)
self.filenames = [f for f in filenames if os.path.lexists(f)]
self._types_cache = {}
| {"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -34,6 +34,12 @@\n \n class Classifier(object):\n def __init__(self, filenames):\n+ # on windows we normalize all filenames to use forward slashes\n+ # this makes it easier to filter using the `files:` regex\n+ # this also makes improperly quoted shell-based hooks work better\n+ # see #1173\n+ if os.altsep == '/' and os.sep == '\\\\':\n+ filenames = (f.replace(os.sep, os.altsep) for f in filenames)\n self.filenames = [f for f in filenames if os.path.lexists(f)]\n self._types_cache = {}\n", "issue": "Run hooks on files in specific dir, rather than using `--all-files`\nI am able to successfully use `pre-commit run --all-files` to run hooks on all my files.\r\n\r\nNow, I'm trying to run the hooks just on files in a particular directory. I think I'm not understanding the docs and I can't find an example to work from.\r\n\r\nHere's what the docs say:\r\n> `--files [FILES [FILES ...]]`: specific filenames to run hooks on.\r\n\r\nI've tried the following variations:\r\n`pre-commit run --files web/modules/custom`\r\n`pre-commit run --files web/modules/custom/*`\r\n`pre-commit run --files [web/modules/custom]`\r\n`pre-commit run --files [web/modules/custom/*]`\r\n`pre-commit run --files [FILES [web/modules/custom]`\r\n`pre-commit run --files [FILES [web/modules/custom/*]`\r\n\r\nI feel really dumb having to ask, but can someone please point me in the right direction?\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef filter_by_include_exclude(names, include, exclude):\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in names\n if include_re.search(filename)\n if not exclude_re.search(filename)\n ]\n\n\nclass Classifier(object):\n def __init__(self, filenames):\n self.filenames = [f for f in filenames if os.path.lexists(f)]\n self._types_cache = {}\n\n def _types_for_file(self, filename):\n try:\n return self._types_cache[filename]\n except KeyError:\n ret = self._types_cache[filename] = tags_from_path(filename)\n return ret\n\n def by_types(self, names, types, exclude_types):\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in names:\n tags = self._types_for_file(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return ret\n\n def filenames_for_hook(self, hook):\n names = self.filenames\n names = filter_by_include_exclude(names, hook.files, hook.exclude)\n names = self.by_types(names, hook.types, hook.exclude_types)\n return names\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\ndef _hook_msg_start(hook, verbose):\n return '{}{}'.format('[{}] '.format(hook.id) if verbose else '', hook.name)\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _run_single_hook(classifier, hook, args, skips, cols, use_color):\n filenames = classifier.filenames_for_hook(hook)\n\n if hook.language == 'pcre':\n logger.warning(\n '`{}` (from {}) uses the deprecated pcre language.\\n'\n 'The pcre language is scheduled for removal in pre-commit 2.x.\\n'\n 'The pygrep language is a more portable (and usually drop-in) '\n 'replacement.'.format(hook.id, hook.src),\n )\n\n if hook.id in skips or hook.alias in skips:\n output.write(\n get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=args.color,\n cols=cols,\n ),\n )\n return 0\n elif not filenames and not hook.always_run:\n output.write(\n get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=args.color,\n cols=cols,\n ),\n )\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n output.write(\n get_hook_message(\n _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,\n ),\n )\n sys.stdout.flush()\n\n diff_before = cmd_output_b('git', 'diff', '--no-ext-diff', retcode=None)\n filenames = tuple(filenames) if hook.pass_filenames else ()\n retcode, out = hook.run(filenames, use_color)\n diff_after = cmd_output_b('git', 'diff', '--no-ext-diff', retcode=None)\n\n file_modifications = diff_before != diff_after\n\n # If the hook makes changes, fail the commit\n if file_modifications:\n retcode = 1\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n output.write_line(color.format_color(pass_fail, print_color, args.color))\n\n if (\n (out or file_modifications) and\n (retcode or args.verbose or hook.verbose)\n ):\n output.write_line('hookid: {}\\n'.format(hook.id))\n\n # Print a message if failing due to file modifications\n if file_modifications:\n output.write('Files were modified by this hook.')\n\n if out:\n output.write_line(' Additional output:')\n\n output.write_line()\n\n if out.strip():\n output.write_line(out.strip(), logfile_name=hook.log_file)\n output.write_line()\n\n return retcode\n\n\ndef _compute_cols(hooks, verbose):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n\n or in the verbose case\n\n Hook name [hookid]...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args):\n if args.origin and args.source:\n return git.get_changed_files(args.origin, args.source)\n elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:\n return (args.commit_msg_filename,)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(config, hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols(hooks, args.verbose)\n filenames = _all_filenames(args)\n filenames = filter_by_include_exclude(filenames, '', config['exclude'])\n classifier = Classifier(filenames)\n retval = 0\n for hook in hooks:\n retval |= _run_single_hook(\n classifier, hook, args, skips, cols, args.color,\n )\n if retval and config['fail_fast']:\n break\n if retval and args.show_diff_on_failure and git.has_diff():\n if args.all_files:\n output.write_line(\n 'pre-commit hook(s) made changes.\\n'\n 'If you are seeing this message in CI, '\n 'reproduce locally with: `pre-commit run --all-files`.\\n'\n 'To run `pre-commit` as part of git workflow, use '\n '`pre-commit install`.',\n )\n output.write_line('All changes made by hooks:')\n # args.color is a boolean.\n # See user_color function in color.py\n subprocess.call((\n 'git', '--no-pager', 'diff', '--no-ext-diff',\n '--color={}'.format({True: 'always', False: 'never'}[args.color]),\n ))\n\n return retval\n\n\ndef _has_unmerged_paths():\n _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(config_file):\n retcode, _, _ = cmd_output_b(\n 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(config_file, store, args, environ=os.environ):\n no_stash = args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(config_file) and not no_stash:\n logger.error(\n 'Your pre-commit configuration is unstaged.\\n'\n '`git add {}` to fix this.'.format(config_file),\n )\n return 1\n\n # Expose origin / source as environment variables for hooks to consume\n if args.origin and args.source:\n environ['PRE_COMMIT_ORIGIN'] = args.origin\n environ['PRE_COMMIT_SOURCE'] = args.source\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(store.directory)\n\n with ctx:\n config = load_config(config_file)\n hooks = [\n hook\n for hook in all_hooks(config, store)\n if not args.hook or hook.id == args.hook or hook.alias == args.hook\n if args.hook_stage in hook.stages\n ]\n\n if args.hook and not hooks:\n output.write_line(\n 'No hook with id `{}` in stage `{}`'.format(\n args.hook, args.hook_stage,\n ),\n )\n return 1\n\n install_hook_envs(hooks, store)\n\n return _run_hooks(config, hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}]} | 3,622 | 175 |
gh_patches_debug_13538 | rasdani/github-patches | git_diff | fossasia__open-event-server-4418 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[sentry] KeyError/v1/users/<int:user_id>/access-codes
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
```
KeyError: 'id'
File "flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "flask_rest_jsonapi/decorators.py", line 32, in wrapper
return func(*args, **kwargs)
File "flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "flask_rest_jsonapi/resource.py", line 68, in dispatch_request
response = method(*args, **kwargs)
File "flask_rest_jsonapi/decorators.py", line 56, in wrapper
return func(*args, **kwargs)
File "flask_rest_jsonapi/resource.py", line 145, in get
objects_count, objects = self._data_layer.get_collection(qs, kwargs)
File "flask_rest_jsonapi/data_layers/alchemy.py", line 92, in get_collection
query = self.query(view_kwargs)
File "app/api/access_codes.py", line 56, in query
if not has_access('is_user_itself', user_id=user.id):
File "app/api/helpers/permission_manager.py", line 450, in has_access
auth = permissions[access_level](lambda *a, **b: True, (), {}, (), **kwargs)
File "app/api/helpers/permissions.py", line 43, in decorator
return fn(*args, **kwargs)
File "app/api/helpers/permission_manager.py", line 109, in is_user_itself
if not user.is_admin and not user.is_super_admin and user.id != kwargs['id']:
```
</issue>
<code>
[start of app/api/access_codes.py]
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from flask_rest_jsonapi.exceptions import ObjectNotFound
3 from app.api.helpers.exceptions import ForbiddenException
4
5 from app.api.bootstrap import api
6 from app.api.helpers.db import safe_query
7 from app.api.helpers.permission_manager import has_access
8 from app.api.helpers.permissions import jwt_required
9 from app.api.helpers.query import event_query
10 from app.api.helpers.utilities import require_relationship
11 from app.api.schema.access_codes import AccessCodeSchema
12 from app.models import db
13 from app.models.access_code import AccessCode
14 from app.models.ticket import Ticket
15 from app.models.user import User
16
17
18 class AccessCodeListPost(ResourceList):
19 """
20 Create AccessCodes
21 """
22 def before_post(self, args, kwargs, data):
23 """
24 before post method to check for required relationships and permissions
25 :param args:
26 :param kwargs:
27 :param data:
28 :return:
29 """
30 require_relationship(['event', 'user'], data)
31 if not has_access('is_coorganizer', event_id=data['event']):
32 raise ObjectNotFound({'parameter': 'event_id'},
33 "Event: {} not found".format(data['event']))
34
35 schema = AccessCodeSchema
36 methods = ['POST', ]
37 data_layer = {'session': db.session,
38 'model': AccessCode
39 }
40
41
42 class AccessCodeList(ResourceList):
43 """
44 List AccessCodes
45 """
46 def query(self, view_kwargs):
47 """
48 Method to get access codes list based on different view_kwargs
49 :param view_kwargs:
50 :return:
51 """
52 query_ = self.session.query(AccessCode)
53 query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')
54 if view_kwargs.get('user_id'):
55 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
56 if not has_access('is_user_itself', user_id=user.id):
57 raise ForbiddenException({'source': ''}, 'Access Forbidden')
58 query_ = query_.join(User).filter(User.id == user.id)
59 if view_kwargs.get('ticket_id'):
60 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
61 if not has_access('is_coorganizer', event_id=ticket.event_id):
62 raise ForbiddenException({'source': ''}, 'Access Forbidden')
63 # access_code - ticket :: many-to-many relationship
64 query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))
65 query_
66 return query_
67
68 view_kwargs = True
69 methods = ['GET', ]
70 schema = AccessCodeSchema
71 data_layer = {'session': db.session,
72 'model': AccessCode,
73 'methods': {
74 'query': query,
75 }}
76
77
78 class AccessCodeDetail(ResourceDetail):
79 """
80 AccessCode detail by id
81 """
82
83 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
84 fetch_as="event_id", model=AccessCode, methods="GET, PATCH"),
85 api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',
86 fetch_as="event_id", model=AccessCode, methods="DELETE"),)
87 schema = AccessCodeSchema
88 data_layer = {'session': db.session,
89 'model': AccessCode
90 }
91
92
93 class AccessCodeRelationshipRequired(ResourceRelationship):
94 """
95 AccessCode Relationship Required
96 """
97 decorators = (jwt_required,)
98 methods = ['GET', 'PATCH']
99 schema = AccessCodeSchema
100 data_layer = {'session': db.session,
101 'model': AccessCode}
102
103
104 class AccessCodeRelationshipOptional(ResourceRelationship):
105 """
106 AccessCode Relationship Optional
107 """
108 decorators = (jwt_required,)
109 schema = AccessCodeSchema
110 data_layer = {'session': db.session,
111 'model': AccessCode}
112
[end of app/api/access_codes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/access_codes.py b/app/api/access_codes.py
--- a/app/api/access_codes.py
+++ b/app/api/access_codes.py
@@ -53,7 +53,7 @@
query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')
if view_kwargs.get('user_id'):
user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
- if not has_access('is_user_itself', user_id=user.id):
+ if not has_access('is_user_itself', id=user.id):
raise ForbiddenException({'source': ''}, 'Access Forbidden')
query_ = query_.join(User).filter(User.id == user.id)
if view_kwargs.get('ticket_id'):
| {"golden_diff": "diff --git a/app/api/access_codes.py b/app/api/access_codes.py\n--- a/app/api/access_codes.py\n+++ b/app/api/access_codes.py\n@@ -53,7 +53,7 @@\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n- if not has_access('is_user_itself', user_id=user.id):\n+ if not has_access('is_user_itself', id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n", "issue": "[sentry] KeyError/v1/users/<int:user_id>/access-codes\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n```\r\nKeyError: 'id'\r\n File \"flask/app.py\", line 1982, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"flask/app.py\", line 1614, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"flask_cors/extension.py\", line 161, in wrapped_function\r\n return cors_after_request(app.make_response(f(*args, **kwargs)))\r\n File \"flask/app.py\", line 1517, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"flask/app.py\", line 1612, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"flask/app.py\", line 1598, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"flask_rest_jsonapi/decorators.py\", line 32, in wrapper\r\n return func(*args, **kwargs)\r\n File \"flask/views.py\", line 84, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"flask_rest_jsonapi/resource.py\", line 68, in dispatch_request\r\n response = method(*args, **kwargs)\r\n File \"flask_rest_jsonapi/decorators.py\", line 56, in wrapper\r\n return func(*args, **kwargs)\r\n File \"flask_rest_jsonapi/resource.py\", line 145, in get\r\n objects_count, objects = self._data_layer.get_collection(qs, kwargs)\r\n File \"flask_rest_jsonapi/data_layers/alchemy.py\", line 92, in get_collection\r\n query = self.query(view_kwargs)\r\n File \"app/api/access_codes.py\", line 56, in query\r\n if not has_access('is_user_itself', user_id=user.id):\r\n File \"app/api/helpers/permission_manager.py\", line 450, in has_access\r\n auth = permissions[access_level](lambda *a, **b: True, (), {}, (), **kwargs)\r\n File \"app/api/helpers/permissions.py\", line 43, in decorator\r\n return fn(*args, **kwargs)\r\n File \"app/api/helpers/permission_manager.py\", line 109, in is_user_itself\r\n if not user.is_admin and not user.is_super_admin and user.id != kwargs['id']:\r\n```\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom app.api.helpers.exceptions import ForbiddenException\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.access_codes import AccessCodeSchema\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.ticket import Ticket\nfrom app.models.user import User\n\n\nclass AccessCodeListPost(ResourceList):\n \"\"\"\n Create AccessCodes\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationships and permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n schema = AccessCodeSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeList(ResourceList):\n \"\"\"\n List AccessCodes\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n Method to get access codes list based on different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(AccessCode)\n query_ = event_query(self, query_, view_kwargs, permission='is_coorganizer')\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_coorganizer', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n # access_code - ticket :: many-to-many relationship\n query_ = AccessCode.query.filter(AccessCode.tickets.any(id=ticket.id))\n query_\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode,\n 'methods': {\n 'query': query,\n }}\n\n\nclass AccessCodeDetail(ResourceDetail):\n \"\"\"\n AccessCode detail by id\n \"\"\"\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"GET, PATCH\"),\n api.has_permission('is_coorganizer_but_not_admin', fetch='event_id',\n fetch_as=\"event_id\", model=AccessCode, methods=\"DELETE\"),)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode\n }\n\n\nclass AccessCodeRelationshipRequired(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Required\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n\n\nclass AccessCodeRelationshipOptional(ResourceRelationship):\n \"\"\"\n AccessCode Relationship Optional\n \"\"\"\n decorators = (jwt_required,)\n schema = AccessCodeSchema\n data_layer = {'session': db.session,\n 'model': AccessCode}\n", "path": "app/api/access_codes.py"}]} | 2,193 | 166 |
gh_patches_debug_13827 | rasdani/github-patches | git_diff | internetarchive__openlibrary-7931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search results page fails to render if search string includes special characters
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
### Evidence / Screenshot (if possible)
<img width="1050" alt="Screen Shot 2023-03-08 at 2 56 35 PM" src="https://user-images.githubusercontent.com/82178/223893720-68b534ce-5bf8-4b2c-b601-92ef76a4a2d4.png">
### Relevant url?
https://openlibrary.org/search?q=Cornell+%2777%3A+The+Music%2C+the+Myth+and+the+Magnificence+of+the+Grateful+Dead+Show+at+Barton+Hall&mode=everything
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...
2. Do ...
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual:
* Expected:
### Details
- **Logged in (Y/N)?**
- **Browser type/version?**
- **Operating system?**
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of openlibrary/solr/query_utils.py]
1 from typing import Callable, Literal, Optional
2 from luqum.parser import parser
3 from luqum.tree import Item, SearchField, BaseOperation, Group, Word, Unary
4 import re
5
6
7 class EmptyTreeError(Exception):
8 pass
9
10
11 def luqum_remove_child(child: Item, parents: list[Item]):
12 """
13 Removes a child from a luqum parse tree. If the tree
14 ends up being empty, errors.
15
16 :param child: Node to remove
17 :param parents: Path of parent nodes leading from the root of the tree
18 """
19 parent = parents[-1] if parents else None
20 if parent is None:
21 # We cannot remove the element if it is the root of the tree
22 raise EmptyTreeError()
23 elif isinstance(parent, (BaseOperation, Group, Unary)):
24 new_children = tuple(c for c in parent.children if c != child)
25 if not new_children:
26 # If we have deleted all the children, we need to delete the parent
27 # as well. And potentially recurse up the tree.
28 luqum_remove_child(parent, parents[:-1])
29 else:
30 parent.children = new_children
31 else:
32 raise ValueError("Not supported for generic class Item")
33
34
35 def luqum_replace_child(parent: Item, old_child: Item, new_child: Item):
36 """
37 Replaces a child in a luqum parse tree.
38 """
39 if isinstance(parent, (BaseOperation, Group, Unary)):
40 new_children = tuple(
41 new_child if c == old_child else c for c in parent.children
42 )
43 parent.children = new_children
44 else:
45 raise ValueError("Not supported for generic class Item")
46
47
48 def luqum_traverse(item: Item, _parents: list[Item] | None = None):
49 """
50 Traverses every node in the parse tree in depth-first order.
51
52 Does not make any guarantees about what will happen if you
53 modify the tree while traversing it 😅 But we do it anyways.
54
55 :param item: Node to traverse
56 :param _parents: Internal parameter for tracking parents
57 """
58 parents = _parents or []
59 yield item, parents
60 new_parents = [*parents, item]
61 for child in item.children:
62 yield from luqum_traverse(child, new_parents)
63
64
65 def escape_unknown_fields(
66 query: str,
67 is_valid_field: Callable[[str], bool],
68 lower=True,
69 ) -> str:
70 """
71 Escapes the colon of any search field that is not deemed valid by the
72 predicate function `is_valid_field`.
73
74 :param query: Query to escape
75 :param is_valid_field: Predicate function that determines if a field is valid
76 :param lower: If true, the field will be lowercased before being checked
77
78 >>> escape_unknown_fields('title:foo', lambda field: False)
79 'title\\\\:foo'
80 >>> escape_unknown_fields('title:foo bar blah:bar baz:boo', lambda field: False)
81 'title\\\\:foo bar blah\\\\:bar baz\\\\:boo'
82 >>> escape_unknown_fields('title:foo bar', {'title'}.__contains__)
83 'title:foo bar'
84 >>> escape_unknown_fields('title:foo bar baz:boo', {'title'}.__contains__)
85 'title:foo bar baz\\\\:boo'
86 >>> escape_unknown_fields('title:foo bar baz:boo', {'TITLE'}.__contains__, lower=False)
87 'title\\\\:foo bar baz\\\\:boo'
88 >>> escape_unknown_fields('hi', {'title'}.__contains__)
89 'hi'
90 >>> escape_unknown_fields('(title:foo) OR (blah:bah)', {'title'}.__contains__)
91 '(title:foo) OR (blah\\\\:bah)'
92 """
93 tree = parser.parse(query)
94 # Note we use the string of the tree, because it strips spaces
95 # like: "title : foo" -> "title:foo"
96 escaped_query = str(tree)
97 offset = 0
98 for sf, _ in luqum_traverse(tree):
99 if isinstance(sf, SearchField) and not is_valid_field(
100 sf.name.lower() if lower else sf.name
101 ):
102 field = sf.name + r'\:'
103 if hasattr(sf, 'head'):
104 # head and tail are used for whitespace between fields;
105 # copy it along to the write space to avoid things smashing
106 # together
107 field = sf.head + field
108
109 # We will be moving left to right, so we need to adjust the offset
110 # to account for the characters we have already replaced
111 escaped_query = (
112 escaped_query[: sf.pos + offset]
113 + field
114 + escaped_query[sf.pos + len(field) - 1 + offset :]
115 )
116 offset += 1
117 return escaped_query
118
119
120 def fully_escape_query(query: str) -> str:
121 """
122 Try to convert a query to basically a plain lucene string.
123
124 >>> fully_escape_query('title:foo')
125 'title\\\\:foo'
126 >>> fully_escape_query('title:foo bar')
127 'title\\\\:foo bar'
128 >>> fully_escape_query('title:foo (bar baz:boo)')
129 'title\\\\:foo \\\\(bar baz\\\\:boo\\\\)'
130 >>> fully_escape_query('x:[A TO Z}')
131 'x\\\\:\\\\[A TO Z\\\\}'
132 >>> fully_escape_query('foo AND bar')
133 'foo and bar'
134 """
135 escaped = query
136 # Escape special characters
137 escaped = re.sub(r'[\[\]\(\)\{\}:"\-+?~^/\\,]', r'\\\g<0>', escaped)
138 # Remove boolean operators by making them lowercase
139 escaped = re.sub(r'AND|OR|NOT', lambda _1: _1.group(0).lower(), escaped)
140 return escaped
141
142
143 def luqum_parser(query: str) -> Item:
144 """
145 Parses a lucene-like query, with the special binding rules of Open Library.
146
147 In our queries, unlike native solr/lucene, field names are greedy, and
148 affect the rest of the query until another field is hit.
149
150 Here are some examples. The first query is the native solr/lucene
151 parsing. The second is the parsing we want.
152
153 Query : title:foo bar
154 Lucene: (title:foo) bar
155 OL : (title:foo bar)
156
157 Query : title:foo OR bar AND author:blah
158 Lucene: (title:foo) OR (bar) AND (author:blah)
159 OL : (title:foo OR bar) AND (author:blah)
160
161 This requires an annoying amount of manipulation of the default
162 Luqum parser, unfortunately.
163
164 Also, OL queries allow spaces after fields.
165 """
166 tree = parser.parse(query)
167
168 def find_next_word(item: Item) -> Optional[tuple[Word, Optional[BaseOperation]]]:
169 if isinstance(item, Word):
170 return item, None
171 elif isinstance(item, BaseOperation) and isinstance(item.children[0], Word):
172 return item.children[0], item
173 else:
174 return None
175
176 for node, parents in luqum_traverse(tree):
177 if isinstance(node, BaseOperation):
178 # if any of the children are SearchField followed by one or more words,
179 # we bundle them together
180 last_sf: SearchField = None
181 to_rem = []
182 for child in node.children:
183 if isinstance(child, SearchField) and isinstance(child.expr, Word):
184 last_sf = child
185 elif last_sf and (next_word := find_next_word(child)):
186 word, parent_op = next_word
187 # Add it over
188 if not isinstance(last_sf.expr, Group):
189 last_sf.expr = Group(type(node)(last_sf.expr, word))
190 last_sf.expr.tail = word.tail
191 word.tail = ''
192 else:
193 last_sf.expr.expr.children[-1].tail = last_sf.expr.tail
194 last_sf.expr.expr.children += (word,)
195 last_sf.expr.tail = word.tail
196 word.tail = ''
197 if parent_op:
198 # A query like: 'title:foo blah OR author:bar
199 # Lucene parses as: (title:foo) ? (blah OR author:bar)
200 # We want : (title:foo ? blah) OR (author:bar)
201 node.op = parent_op.op
202 node.children += (*parent_op.children[1:],)
203 to_rem.append(child)
204 else:
205 last_sf = None
206 if len(to_rem) == len(node.children) - 1:
207 # We only have the searchfield left!
208 if parents:
209 # Move the head to the next element
210 last_sf.head = node.head
211 parents[-1].children = tuple(
212 child if child is not node else last_sf
213 for child in parents[-1].children
214 )
215 else:
216 tree = last_sf
217 break
218 else:
219 node.children = tuple(
220 child for child in node.children if child not in to_rem
221 )
222
223 # Remove spaces before field names
224 for node, parents in luqum_traverse(tree):
225 if isinstance(node, SearchField):
226 node.expr.head = ''
227
228 return tree
229
230
231 def query_dict_to_str(
232 escaped: dict | None = None,
233 unescaped: dict | None = None,
234 op: Literal['AND', 'OR', ''] = '',
235 phrase: bool = False,
236 ) -> str:
237 """
238 Converts a query dict to a search query.
239
240 >>> query_dict_to_str({'title': 'foo'})
241 'title:(foo)'
242 >>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'})
243 'title:(foo bar) author:(bar)'
244 >>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'}, op='OR')
245 'title:(foo bar) OR author:(bar)'
246 >>> query_dict_to_str({'title': 'foo ? to escape'})
247 'title:(foo \\\\? to escape)'
248 >>> query_dict_to_str({'title': 'YES AND'})
249 'title:(YES and)'
250 >>> query_dict_to_str({'publisher_facet': 'Running Press'}, phrase=True)
251 'publisher_facet:"Running Press"'
252 """
253 result = ''
254 if escaped:
255 result += f' {op} '.join(
256 f'{k}:"{fully_escape_query(v)}"'
257 if phrase
258 else f'{k}:({fully_escape_query(v)})'
259 for k, v in escaped.items()
260 )
261 if unescaped:
262 if result:
263 result += f' {op} '
264 result += f' {op} '.join(f'{k}:{v}' for k, v in unescaped.items())
265 return result
266
[end of openlibrary/solr/query_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/solr/query_utils.py b/openlibrary/solr/query_utils.py
--- a/openlibrary/solr/query_utils.py
+++ b/openlibrary/solr/query_utils.py
@@ -131,10 +131,12 @@
'x\\\\:\\\\[A TO Z\\\\}'
>>> fully_escape_query('foo AND bar')
'foo and bar'
+ >>> fully_escape_query("foo's bar")
+ "foo\\\\'s bar"
"""
escaped = query
# Escape special characters
- escaped = re.sub(r'[\[\]\(\)\{\}:"\-+?~^/\\,]', r'\\\g<0>', escaped)
+ escaped = re.sub(r'[\[\]\(\)\{\}:"\-+?~^/\\,\']', r'\\\g<0>', escaped)
# Remove boolean operators by making them lowercase
escaped = re.sub(r'AND|OR|NOT', lambda _1: _1.group(0).lower(), escaped)
return escaped
| {"golden_diff": "diff --git a/openlibrary/solr/query_utils.py b/openlibrary/solr/query_utils.py\n--- a/openlibrary/solr/query_utils.py\n+++ b/openlibrary/solr/query_utils.py\n@@ -131,10 +131,12 @@\n 'x\\\\\\\\:\\\\\\\\[A TO Z\\\\\\\\}'\n >>> fully_escape_query('foo AND bar')\n 'foo and bar'\n+ >>> fully_escape_query(\"foo's bar\")\n+ \"foo\\\\\\\\'s bar\"\n \"\"\"\n escaped = query\n # Escape special characters\n- escaped = re.sub(r'[\\[\\]\\(\\)\\{\\}:\"\\-+?~^/\\\\,]', r'\\\\\\g<0>', escaped)\n+ escaped = re.sub(r'[\\[\\]\\(\\)\\{\\}:\"\\-+?~^/\\\\,\\']', r'\\\\\\g<0>', escaped)\n # Remove boolean operators by making them lowercase\n escaped = re.sub(r'AND|OR|NOT', lambda _1: _1.group(0).lower(), escaped)\n return escaped\n", "issue": "Search results page fails to render if search string includes special characters\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\n\r\n### Evidence / Screenshot (if possible)\r\n<img width=\"1050\" alt=\"Screen Shot 2023-03-08 at 2 56 35 PM\" src=\"https://user-images.githubusercontent.com/82178/223893720-68b534ce-5bf8-4b2c-b601-92ef76a4a2d4.png\">\r\n\r\n### Relevant url?\r\n\r\nhttps://openlibrary.org/search?q=Cornell+%2777%3A+The+Music%2C+the+Myth+and+the+Magnificence+of+the+Grateful+Dead+Show+at+Barton+Hall&mode=everything\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...\r\n2. Do ...\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual:\r\n* Expected:\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?**\r\n- **Browser type/version?**\r\n- **Operating system?**\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "from typing import Callable, Literal, Optional\nfrom luqum.parser import parser\nfrom luqum.tree import Item, SearchField, BaseOperation, Group, Word, Unary\nimport re\n\n\nclass EmptyTreeError(Exception):\n pass\n\n\ndef luqum_remove_child(child: Item, parents: list[Item]):\n \"\"\"\n Removes a child from a luqum parse tree. If the tree\n ends up being empty, errors.\n\n :param child: Node to remove\n :param parents: Path of parent nodes leading from the root of the tree\n \"\"\"\n parent = parents[-1] if parents else None\n if parent is None:\n # We cannot remove the element if it is the root of the tree\n raise EmptyTreeError()\n elif isinstance(parent, (BaseOperation, Group, Unary)):\n new_children = tuple(c for c in parent.children if c != child)\n if not new_children:\n # If we have deleted all the children, we need to delete the parent\n # as well. And potentially recurse up the tree.\n luqum_remove_child(parent, parents[:-1])\n else:\n parent.children = new_children\n else:\n raise ValueError(\"Not supported for generic class Item\")\n\n\ndef luqum_replace_child(parent: Item, old_child: Item, new_child: Item):\n \"\"\"\n Replaces a child in a luqum parse tree.\n \"\"\"\n if isinstance(parent, (BaseOperation, Group, Unary)):\n new_children = tuple(\n new_child if c == old_child else c for c in parent.children\n )\n parent.children = new_children\n else:\n raise ValueError(\"Not supported for generic class Item\")\n\n\ndef luqum_traverse(item: Item, _parents: list[Item] | None = None):\n \"\"\"\n Traverses every node in the parse tree in depth-first order.\n\n Does not make any guarantees about what will happen if you\n modify the tree while traversing it \ud83d\ude05 But we do it anyways.\n\n :param item: Node to traverse\n :param _parents: Internal parameter for tracking parents\n \"\"\"\n parents = _parents or []\n yield item, parents\n new_parents = [*parents, item]\n for child in item.children:\n yield from luqum_traverse(child, new_parents)\n\n\ndef escape_unknown_fields(\n query: str,\n is_valid_field: Callable[[str], bool],\n lower=True,\n) -> str:\n \"\"\"\n Escapes the colon of any search field that is not deemed valid by the\n predicate function `is_valid_field`.\n\n :param query: Query to escape\n :param is_valid_field: Predicate function that determines if a field is valid\n :param lower: If true, the field will be lowercased before being checked\n\n >>> escape_unknown_fields('title:foo', lambda field: False)\n 'title\\\\\\\\:foo'\n >>> escape_unknown_fields('title:foo bar blah:bar baz:boo', lambda field: False)\n 'title\\\\\\\\:foo bar blah\\\\\\\\:bar baz\\\\\\\\:boo'\n >>> escape_unknown_fields('title:foo bar', {'title'}.__contains__)\n 'title:foo bar'\n >>> escape_unknown_fields('title:foo bar baz:boo', {'title'}.__contains__)\n 'title:foo bar baz\\\\\\\\:boo'\n >>> escape_unknown_fields('title:foo bar baz:boo', {'TITLE'}.__contains__, lower=False)\n 'title\\\\\\\\:foo bar baz\\\\\\\\:boo'\n >>> escape_unknown_fields('hi', {'title'}.__contains__)\n 'hi'\n >>> escape_unknown_fields('(title:foo) OR (blah:bah)', {'title'}.__contains__)\n '(title:foo) OR (blah\\\\\\\\:bah)'\n \"\"\"\n tree = parser.parse(query)\n # Note we use the string of the tree, because it strips spaces\n # like: \"title : foo\" -> \"title:foo\"\n escaped_query = str(tree)\n offset = 0\n for sf, _ in luqum_traverse(tree):\n if isinstance(sf, SearchField) and not is_valid_field(\n sf.name.lower() if lower else sf.name\n ):\n field = sf.name + r'\\:'\n if hasattr(sf, 'head'):\n # head and tail are used for whitespace between fields;\n # copy it along to the write space to avoid things smashing\n # together\n field = sf.head + field\n\n # We will be moving left to right, so we need to adjust the offset\n # to account for the characters we have already replaced\n escaped_query = (\n escaped_query[: sf.pos + offset]\n + field\n + escaped_query[sf.pos + len(field) - 1 + offset :]\n )\n offset += 1\n return escaped_query\n\n\ndef fully_escape_query(query: str) -> str:\n \"\"\"\n Try to convert a query to basically a plain lucene string.\n\n >>> fully_escape_query('title:foo')\n 'title\\\\\\\\:foo'\n >>> fully_escape_query('title:foo bar')\n 'title\\\\\\\\:foo bar'\n >>> fully_escape_query('title:foo (bar baz:boo)')\n 'title\\\\\\\\:foo \\\\\\\\(bar baz\\\\\\\\:boo\\\\\\\\)'\n >>> fully_escape_query('x:[A TO Z}')\n 'x\\\\\\\\:\\\\\\\\[A TO Z\\\\\\\\}'\n >>> fully_escape_query('foo AND bar')\n 'foo and bar'\n \"\"\"\n escaped = query\n # Escape special characters\n escaped = re.sub(r'[\\[\\]\\(\\)\\{\\}:\"\\-+?~^/\\\\,]', r'\\\\\\g<0>', escaped)\n # Remove boolean operators by making them lowercase\n escaped = re.sub(r'AND|OR|NOT', lambda _1: _1.group(0).lower(), escaped)\n return escaped\n\n\ndef luqum_parser(query: str) -> Item:\n \"\"\"\n Parses a lucene-like query, with the special binding rules of Open Library.\n\n In our queries, unlike native solr/lucene, field names are greedy, and\n affect the rest of the query until another field is hit.\n\n Here are some examples. The first query is the native solr/lucene\n parsing. The second is the parsing we want.\n\n Query : title:foo bar\n Lucene: (title:foo) bar\n OL : (title:foo bar)\n\n Query : title:foo OR bar AND author:blah\n Lucene: (title:foo) OR (bar) AND (author:blah)\n OL : (title:foo OR bar) AND (author:blah)\n\n This requires an annoying amount of manipulation of the default\n Luqum parser, unfortunately.\n\n Also, OL queries allow spaces after fields.\n \"\"\"\n tree = parser.parse(query)\n\n def find_next_word(item: Item) -> Optional[tuple[Word, Optional[BaseOperation]]]:\n if isinstance(item, Word):\n return item, None\n elif isinstance(item, BaseOperation) and isinstance(item.children[0], Word):\n return item.children[0], item\n else:\n return None\n\n for node, parents in luqum_traverse(tree):\n if isinstance(node, BaseOperation):\n # if any of the children are SearchField followed by one or more words,\n # we bundle them together\n last_sf: SearchField = None\n to_rem = []\n for child in node.children:\n if isinstance(child, SearchField) and isinstance(child.expr, Word):\n last_sf = child\n elif last_sf and (next_word := find_next_word(child)):\n word, parent_op = next_word\n # Add it over\n if not isinstance(last_sf.expr, Group):\n last_sf.expr = Group(type(node)(last_sf.expr, word))\n last_sf.expr.tail = word.tail\n word.tail = ''\n else:\n last_sf.expr.expr.children[-1].tail = last_sf.expr.tail\n last_sf.expr.expr.children += (word,)\n last_sf.expr.tail = word.tail\n word.tail = ''\n if parent_op:\n # A query like: 'title:foo blah OR author:bar\n # Lucene parses as: (title:foo) ? (blah OR author:bar)\n # We want : (title:foo ? blah) OR (author:bar)\n node.op = parent_op.op\n node.children += (*parent_op.children[1:],)\n to_rem.append(child)\n else:\n last_sf = None\n if len(to_rem) == len(node.children) - 1:\n # We only have the searchfield left!\n if parents:\n # Move the head to the next element\n last_sf.head = node.head\n parents[-1].children = tuple(\n child if child is not node else last_sf\n for child in parents[-1].children\n )\n else:\n tree = last_sf\n break\n else:\n node.children = tuple(\n child for child in node.children if child not in to_rem\n )\n\n # Remove spaces before field names\n for node, parents in luqum_traverse(tree):\n if isinstance(node, SearchField):\n node.expr.head = ''\n\n return tree\n\n\ndef query_dict_to_str(\n escaped: dict | None = None,\n unescaped: dict | None = None,\n op: Literal['AND', 'OR', ''] = '',\n phrase: bool = False,\n) -> str:\n \"\"\"\n Converts a query dict to a search query.\n\n >>> query_dict_to_str({'title': 'foo'})\n 'title:(foo)'\n >>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'})\n 'title:(foo bar) author:(bar)'\n >>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'}, op='OR')\n 'title:(foo bar) OR author:(bar)'\n >>> query_dict_to_str({'title': 'foo ? to escape'})\n 'title:(foo \\\\\\\\? to escape)'\n >>> query_dict_to_str({'title': 'YES AND'})\n 'title:(YES and)'\n >>> query_dict_to_str({'publisher_facet': 'Running Press'}, phrase=True)\n 'publisher_facet:\"Running Press\"'\n \"\"\"\n result = ''\n if escaped:\n result += f' {op} '.join(\n f'{k}:\"{fully_escape_query(v)}\"'\n if phrase\n else f'{k}:({fully_escape_query(v)})'\n for k, v in escaped.items()\n )\n if unescaped:\n if result:\n result += f' {op} '\n result += f' {op} '.join(f'{k}:{v}' for k, v in unescaped.items())\n return result\n", "path": "openlibrary/solr/query_utils.py"}]} | 3,931 | 231 |
gh_patches_debug_31495 | rasdani/github-patches | git_diff | modin-project__modin-2784 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ASV] add read_csv benchmark with dtype/names/parse_dates parameters
</issue>
<code>
[start of asv_bench/benchmarks/io/csv.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import modin.pandas as pd
15 import numpy as np
16
17 from ..utils import (
18 generate_dataframe,
19 RAND_LOW,
20 RAND_HIGH,
21 ASV_USE_IMPL,
22 ASV_DATASET_SIZE,
23 UNARY_OP_DATA_SIZE,
24 IMPL,
25 execute,
26 get_shape_id,
27 )
28
29 # ray init
30 if ASV_USE_IMPL == "modin":
31 pd.DataFrame([])
32
33
34 class BaseReadCsv:
35 # test data file can de created only once
36 def setup_cache(self, test_filename="io_test_file"):
37 test_filenames = {}
38 for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:
39 shape_id = get_shape_id(shape)
40 test_filenames[shape_id] = f"{test_filename}_{shape_id}.csv"
41 df = generate_dataframe("pandas", "str_int", *shape, RAND_LOW, RAND_HIGH)
42 df.to_csv(test_filenames[shape_id], index=False)
43
44 return test_filenames
45
46 def setup(self, test_filenames, shape, *args, **kwargs):
47 self.shape_id = get_shape_id(shape)
48
49
50 class TimeReadCsvSkiprows(BaseReadCsv):
51 param_names = ["shape", "skiprows"]
52 params = [
53 UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],
54 [
55 None,
56 lambda x: x % 2,
57 np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0] // 10),
58 np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0], 2),
59 ],
60 ]
61
62 def time_skiprows(self, test_filenames, shape, skiprows):
63 execute(
64 IMPL[ASV_USE_IMPL].read_csv(
65 test_filenames[self.shape_id], skiprows=skiprows
66 )
67 )
68
[end of asv_bench/benchmarks/io/csv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -65,3 +65,70 @@
test_filenames[self.shape_id], skiprows=skiprows
)
)
+
+
+class TimeReadCsvNamesDtype:
+ _dtypes_params = ["Int64", "Int64_Timestamp"]
+ _timestamp_columns = ["col1", "col2"]
+
+ param_names = ["shape", "names", "dtype"]
+ params = [
+ UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],
+ ["array-like"],
+ _dtypes_params,
+ ]
+
+ def _get_file_id(self, shape, dtype):
+ return get_shape_id(shape) + dtype
+
+ def _add_timestamp_columns(self, df):
+ df = df.copy()
+ date_column = IMPL["pandas"].date_range(
+ "2000",
+ periods=df.shape[0],
+ freq="ms",
+ )
+ for col in self._timestamp_columns:
+ df[col] = date_column
+ return df
+
+ def setup_cache(self, test_filename="io_test_file_csv_names_dtype"):
+ # filenames with a metadata of saved dataframes
+ cache = {}
+ for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:
+ for dtype in self._dtypes_params:
+ df = generate_dataframe("pandas", "int", *shape, RAND_LOW, RAND_HIGH)
+ if dtype == "Int64_Timestamp":
+ df = self._add_timestamp_columns(df)
+
+ file_id = self._get_file_id(shape, dtype)
+ cache[file_id] = (
+ f"{test_filename}_{file_id}.csv",
+ df.columns.to_list(),
+ df.dtypes.to_dict(),
+ )
+ df.to_csv(cache[file_id][0], index=False)
+ return cache
+
+ def setup(self, cache, shape, names, dtype):
+ file_id = self._get_file_id(shape, dtype)
+ self.filename, self.names, self.dtype = cache[file_id]
+
+ self.parse_dates = None
+ if dtype == "Int64_Timestamp":
+ # cached version of dtype should not change
+ self.dtype = self.dtype.copy()
+ for col in self._timestamp_columns:
+ del self.dtype[col]
+ self.parse_dates = self._timestamp_columns
+
+ def time_read_csv_names_dtype(self, cache, shape, names, dtype):
+ execute(
+ IMPL[ASV_USE_IMPL].read_csv(
+ self.filename,
+ names=self.names,
+ header=0,
+ dtype=self.dtype,
+ parse_dates=self.parse_dates,
+ )
+ )
| {"golden_diff": "diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py\n--- a/asv_bench/benchmarks/io/csv.py\n+++ b/asv_bench/benchmarks/io/csv.py\n@@ -65,3 +65,70 @@\n test_filenames[self.shape_id], skiprows=skiprows\n )\n )\n+\n+\n+class TimeReadCsvNamesDtype:\n+ _dtypes_params = [\"Int64\", \"Int64_Timestamp\"]\n+ _timestamp_columns = [\"col1\", \"col2\"]\n+\n+ param_names = [\"shape\", \"names\", \"dtype\"]\n+ params = [\n+ UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n+ [\"array-like\"],\n+ _dtypes_params,\n+ ]\n+\n+ def _get_file_id(self, shape, dtype):\n+ return get_shape_id(shape) + dtype\n+\n+ def _add_timestamp_columns(self, df):\n+ df = df.copy()\n+ date_column = IMPL[\"pandas\"].date_range(\n+ \"2000\",\n+ periods=df.shape[0],\n+ freq=\"ms\",\n+ )\n+ for col in self._timestamp_columns:\n+ df[col] = date_column\n+ return df\n+\n+ def setup_cache(self, test_filename=\"io_test_file_csv_names_dtype\"):\n+ # filenames with a metadata of saved dataframes\n+ cache = {}\n+ for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:\n+ for dtype in self._dtypes_params:\n+ df = generate_dataframe(\"pandas\", \"int\", *shape, RAND_LOW, RAND_HIGH)\n+ if dtype == \"Int64_Timestamp\":\n+ df = self._add_timestamp_columns(df)\n+\n+ file_id = self._get_file_id(shape, dtype)\n+ cache[file_id] = (\n+ f\"{test_filename}_{file_id}.csv\",\n+ df.columns.to_list(),\n+ df.dtypes.to_dict(),\n+ )\n+ df.to_csv(cache[file_id][0], index=False)\n+ return cache\n+\n+ def setup(self, cache, shape, names, dtype):\n+ file_id = self._get_file_id(shape, dtype)\n+ self.filename, self.names, self.dtype = cache[file_id]\n+\n+ self.parse_dates = None\n+ if dtype == \"Int64_Timestamp\":\n+ # cached version of dtype should not change\n+ self.dtype = self.dtype.copy()\n+ for col in self._timestamp_columns:\n+ del self.dtype[col]\n+ self.parse_dates = self._timestamp_columns\n+\n+ def time_read_csv_names_dtype(self, cache, shape, names, dtype):\n+ execute(\n+ IMPL[ASV_USE_IMPL].read_csv(\n+ self.filename,\n+ names=self.names,\n+ header=0,\n+ dtype=self.dtype,\n+ parse_dates=self.parse_dates,\n+ )\n+ )\n", "issue": "[ASV] add read_csv benchmark with dtype/names/parse_dates parameters\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport modin.pandas as pd\nimport numpy as np\n\nfrom ..utils import (\n generate_dataframe,\n RAND_LOW,\n RAND_HIGH,\n ASV_USE_IMPL,\n ASV_DATASET_SIZE,\n UNARY_OP_DATA_SIZE,\n IMPL,\n execute,\n get_shape_id,\n)\n\n# ray init\nif ASV_USE_IMPL == \"modin\":\n pd.DataFrame([])\n\n\nclass BaseReadCsv:\n # test data file can de created only once\n def setup_cache(self, test_filename=\"io_test_file\"):\n test_filenames = {}\n for shape in UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE]:\n shape_id = get_shape_id(shape)\n test_filenames[shape_id] = f\"{test_filename}_{shape_id}.csv\"\n df = generate_dataframe(\"pandas\", \"str_int\", *shape, RAND_LOW, RAND_HIGH)\n df.to_csv(test_filenames[shape_id], index=False)\n\n return test_filenames\n\n def setup(self, test_filenames, shape, *args, **kwargs):\n self.shape_id = get_shape_id(shape)\n\n\nclass TimeReadCsvSkiprows(BaseReadCsv):\n param_names = [\"shape\", \"skiprows\"]\n params = [\n UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE],\n [\n None,\n lambda x: x % 2,\n np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0] // 10),\n np.arange(1, UNARY_OP_DATA_SIZE[ASV_DATASET_SIZE][0][0], 2),\n ],\n ]\n\n def time_skiprows(self, test_filenames, shape, skiprows):\n execute(\n IMPL[ASV_USE_IMPL].read_csv(\n test_filenames[self.shape_id], skiprows=skiprows\n )\n )\n", "path": "asv_bench/benchmarks/io/csv.py"}]} | 1,242 | 649 |
gh_patches_debug_8525 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-1091 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive implicit yield violation
# Bug report
<!--
Hi, thanks for submitting a bug. We appreciate that.
But, we will need some information about what's wrong to help you.
-->
## What's wrong
With common function to split iterable by chunks WPS raises `ImplicitYieldFromViolation`
<!-- Describe what is not working. Please, attach a traceback. -->
```
def chunks(value, chunk_size):
"""Yield successive n-sized chunks from l."""
for index in range(0, len(value), chunk_size):
yield value[index:index + chunk_size]
```
## How is that should be
`ImplicitYieldFromViolation` should be raised only if no other operations with iterable item.
<!-- Describe how it should work. -->
<!--
Love wemake-python-styleguide? ❤️
Do you want your bugs to be solved faster?
Please consider supporting our collective:
👉 https://opencollective.com/wemake-python-styleguide/donate
-->
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/loops.py]
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from collections import defaultdict
5 from typing import ClassVar, DefaultDict, List, Optional, Union
6
7 from typing_extensions import final
8
9 from wemake_python_styleguide.compat.aliases import AssignNodes, ForNodes
10 from wemake_python_styleguide.compat.functions import get_assign_targets
11 from wemake_python_styleguide.logic import (
12 nodes,
13 operators,
14 slices,
15 source,
16 walk,
17 )
18 from wemake_python_styleguide.logic.variables import (
19 is_valid_block_variable_definition,
20 )
21 from wemake_python_styleguide.types import AnyFor, AnyNodes
22 from wemake_python_styleguide.violations.best_practices import (
23 LambdaInsideLoopViolation,
24 LoopVariableDefinitionViolation,
25 YieldInComprehensionViolation,
26 )
27 from wemake_python_styleguide.violations.complexity import (
28 TooManyForsInComprehensionViolation,
29 )
30 from wemake_python_styleguide.violations.consistency import (
31 MultilineLoopViolation,
32 MultipleIfsInComprehensionViolation,
33 UselessContinueViolation,
34 WrongLoopIterTypeViolation,
35 )
36 from wemake_python_styleguide.violations.refactoring import (
37 ImplicitItemsIteratorViolation,
38 ImplicitSumViolation,
39 ImplicitYieldFromViolation,
40 UselessLoopElseViolation,
41 )
42 from wemake_python_styleguide.visitors import base, decorators
43
44 _AnyLoop = Union[AnyFor, ast.While]
45 _AnyComprehension = Union[
46 ast.ListComp,
47 ast.DictComp,
48 ast.SetComp,
49 ast.GeneratorExp,
50 ]
51
52
53 @final
54 @decorators.alias('visit_any_comprehension', (
55 'visit_ListComp',
56 'visit_DictComp',
57 'visit_SetComp',
58 'visit_GeneratorExp',
59 ))
60 class WrongComprehensionVisitor(base.BaseNodeVisitor):
61 """Checks comprehensions for correctness."""
62
63 _max_ifs: ClassVar[int] = 1
64 _max_fors: ClassVar[int] = 2
65
66 def __init__(self, *args, **kwargs) -> None:
67 """Creates a counter for tracked metrics."""
68 super().__init__(*args, **kwargs)
69 self._fors: DefaultDict[ast.AST, int] = defaultdict(int)
70
71 def visit_comprehension(self, node: ast.comprehension) -> None:
72 """
73 Finds multiple ``if`` and ``for`` nodes inside the comprehension.
74
75 Raises:
76 MultipleIfsInComprehensionViolation
77 TooManyForsInComprehensionViolation
78
79 """
80 self._check_ifs(node)
81 self._check_fors(node)
82 self.generic_visit(node)
83
84 def visit_any_comprehension(self, node: _AnyComprehension) -> None:
85 """
86 Finds incorrect patterns inside comprehensions.
87
88 Raises:
89 YieldInComprehensionViolation
90
91 """
92 self._check_contains_yield(node)
93 self.generic_visit(node)
94
95 def _check_ifs(self, node: ast.comprehension) -> None:
96 if len(node.ifs) > self._max_ifs:
97 # We are trying to fix line number in the report,
98 # since `comprehension` does not have this property.
99 parent = nodes.get_parent(node) or node
100 self.add_violation(MultipleIfsInComprehensionViolation(parent))
101
102 def _check_fors(self, node: ast.comprehension) -> None:
103 parent = nodes.get_parent(node)
104 self._fors[parent] = len(parent.generators) # type: ignore
105
106 def _check_contains_yield(self, node: _AnyComprehension) -> None:
107 for sub_node in ast.walk(node):
108 if isinstance(sub_node, ast.Yield):
109 self.add_violation(YieldInComprehensionViolation(node))
110
111 def _post_visit(self) -> None:
112 for node, for_count in self._fors.items():
113 if for_count > self._max_fors:
114 self.add_violation(TooManyForsInComprehensionViolation(node))
115
116
117 @final
118 @decorators.alias('visit_any_loop', (
119 'visit_For',
120 'visit_While',
121 'visit_AsyncFor',
122 ))
123 class WrongLoopVisitor(base.BaseNodeVisitor):
124 """Responsible for examining loops."""
125
126 def visit_any_loop(self, node: _AnyLoop) -> None:
127 """
128 Checks ``for`` and ``while`` loops.
129
130 Raises:
131 UselessLoopElseViolation
132 LambdaInsideLoopViolation
133 MultilineLoopViolation
134
135 """
136 self._check_loop_needs_else(node)
137 self._check_lambda_inside_loop(node)
138 self._check_useless_continue(node)
139 self._check_multiline_loop(node)
140 self.generic_visit(node)
141
142 def _does_loop_contain_node( # TODO: move, reuse in annotations.py
143 self,
144 loop: Optional[_AnyLoop],
145 to_check: ast.Break,
146 ) -> bool:
147 if loop is None:
148 return False
149
150 for inner_node in ast.walk(loop):
151 # We are checking this specific node, not just any `break`:
152 if to_check is inner_node:
153 return True
154 return False
155
156 def _has_break(self, node: _AnyLoop) -> bool:
157 closest_loop = None
158
159 for subnode in ast.walk(node):
160 if isinstance(subnode, (*ForNodes, ast.While)):
161 if subnode is not node:
162 closest_loop = subnode
163
164 if isinstance(subnode, ast.Break):
165 is_nested_break = self._does_loop_contain_node(
166 closest_loop, subnode,
167 )
168 if not is_nested_break:
169 return True
170 return False
171
172 def _check_loop_needs_else(self, node: _AnyLoop) -> None:
173 if node.orelse and not self._has_break(node):
174 self.add_violation(UselessLoopElseViolation(node))
175
176 def _check_lambda_inside_loop(self, node: _AnyLoop) -> None:
177 for subnode in node.body:
178 if walk.is_contained(subnode, (ast.Lambda,)):
179 self.add_violation(LambdaInsideLoopViolation(node))
180
181 def _check_useless_continue(self, node: _AnyLoop) -> None:
182 nodes_at_line: DefaultDict[int, List[ast.AST]] = defaultdict(list)
183 for sub_node in ast.walk(node):
184 lineno = getattr(sub_node, 'lineno', None)
185 if lineno is not None:
186 nodes_at_line[lineno].append(sub_node)
187
188 last_line = nodes_at_line[sorted(nodes_at_line.keys())[-1]]
189 if any(isinstance(last, ast.Continue) for last in last_line):
190 self.add_violation(UselessContinueViolation(node))
191
192 def _check_multiline_loop(self, node: _AnyLoop) -> None:
193 start_lineno = getattr(node, 'lineno', None)
194
195 if isinstance(node, ast.While):
196 node_to_check = node.test
197 else:
198 node_to_check = node.iter
199
200 for sub_node in ast.walk(node_to_check):
201 sub_lineno = getattr(sub_node, 'lineno', None)
202 if sub_lineno is not None and sub_lineno > start_lineno:
203 self.add_violation(MultilineLoopViolation(node))
204 break
205
206
207 @final
208 @decorators.alias('visit_any_for', (
209 'visit_For',
210 'visit_AsyncFor',
211 ))
212 class WrongLoopDefinitionVisitor(base.BaseNodeVisitor):
213 """Responsible for ``for`` loops and comprehensions definitions."""
214
215 _forbidden_for_iters: ClassVar[AnyNodes] = (
216 ast.List,
217 ast.ListComp,
218 ast.Dict,
219 ast.DictComp,
220 ast.Set,
221 ast.SetComp,
222 ast.GeneratorExp,
223 ast.Num,
224 ast.NameConstant,
225 )
226
227 def visit_any_for(self, node: AnyFor) -> None:
228 """
229 Ensures that ``for`` loop definitions are correct.
230
231 Raises:
232 LoopVariableDefinitionViolation
233 WrongLoopIterTypeViolation
234 ImplicitSumViolation
235
236 """
237 self._check_variable_definitions(node.target)
238 self._check_explicit_iter_type(node)
239 self._check_implicit_sum(node)
240 self._check_implicit_yield_from(node)
241 self.generic_visit(node)
242
243 def visit_comprehension(self, node: ast.comprehension) -> None:
244 """
245 Ensures that comprehension definitions are correct.
246
247 Raises:
248 LoopVariableDefinitionViolation
249
250 """
251 self._check_variable_definitions(node.target)
252 self._check_explicit_iter_type(node)
253 self.generic_visit(node)
254
255 def _check_variable_definitions(self, node: ast.AST) -> None:
256 if not is_valid_block_variable_definition(node):
257 self.add_violation(LoopVariableDefinitionViolation(node))
258
259 def _check_explicit_iter_type(
260 self,
261 node: Union[AnyFor, ast.comprehension],
262 ) -> None:
263 node_iter = operators.unwrap_unary_node(node.iter)
264 is_wrong = isinstance(node_iter, self._forbidden_for_iters)
265 is_empty = isinstance(node_iter, ast.Tuple) and not node_iter.elts
266 if is_wrong or is_empty:
267 self.add_violation(WrongLoopIterTypeViolation(node_iter))
268
269 def _check_implicit_sum(self, node: AnyFor) -> None:
270 is_implicit_sum = (
271 len(node.body) == 1 and
272 isinstance(node.body[0], ast.AugAssign) and
273 isinstance(node.body[0].op, ast.Add) and
274 isinstance(node.body[0].target, ast.Name)
275 )
276 if is_implicit_sum:
277 self.add_violation(ImplicitSumViolation(node))
278
279 def _check_implicit_yield_from(self, node: AnyFor) -> None:
280 is_implicit_yield_from = (
281 len(node.body) == 1 and
282 isinstance(node.body[0], ast.Expr) and
283 isinstance(node.body[0].value, ast.Yield)
284 )
285 if is_implicit_yield_from:
286 self.add_violation(ImplicitYieldFromViolation(node))
287
288
289 @final
290 class SyncForLoopVisitor(base.BaseNodeVisitor):
291 """We use this visitor to check just sync ``for`` loops."""
292
293 def visit_For(self, node: ast.For) -> None:
294 """
295 Checks for hidden patterns in sync loops.
296
297 Raises:
298 ImplicitItemsIteratorViolation
299
300 """
301 self._check_implicit_items(node)
302 self.generic_visit(node)
303
304 def _check_implicit_items(self, node: ast.For) -> None:
305 iterable = source.node_to_string(node.iter)
306 target = source.node_to_string(node.target)
307
308 for sub in ast.walk(node):
309 if isinstance(sub, ast.Subscript):
310 if self._is_assigned_target(sub):
311 continue
312
313 if slices.is_same_slice(iterable, target, sub):
314 self.add_violation(ImplicitItemsIteratorViolation(node))
315 break
316
317 def _is_assigned_target(self, node: ast.Subscript) -> bool:
318 parent = nodes.get_parent(node)
319 if not isinstance(parent, (*AssignNodes, ast.AugAssign)):
320 return False
321 return any(node == target for target in get_assign_targets(parent))
322
[end of wemake_python_styleguide/visitors/ast/loops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/ast/loops.py b/wemake_python_styleguide/visitors/ast/loops.py
--- a/wemake_python_styleguide/visitors/ast/loops.py
+++ b/wemake_python_styleguide/visitors/ast/loops.py
@@ -277,6 +277,9 @@
self.add_violation(ImplicitSumViolation(node))
def _check_implicit_yield_from(self, node: AnyFor) -> None:
+ if isinstance(nodes.get_context(node), ast.AsyncFunctionDef):
+ # Python does not support 'yield from' inside async functions
+ return
is_implicit_yield_from = (
len(node.body) == 1 and
isinstance(node.body[0], ast.Expr) and
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/loops.py b/wemake_python_styleguide/visitors/ast/loops.py\n--- a/wemake_python_styleguide/visitors/ast/loops.py\n+++ b/wemake_python_styleguide/visitors/ast/loops.py\n@@ -277,6 +277,9 @@\n self.add_violation(ImplicitSumViolation(node))\n \n def _check_implicit_yield_from(self, node: AnyFor) -> None:\n+ if isinstance(nodes.get_context(node), ast.AsyncFunctionDef):\n+ # Python does not support 'yield from' inside async functions\n+ return\n is_implicit_yield_from = (\n len(node.body) == 1 and\n isinstance(node.body[0], ast.Expr) and\n", "issue": "False positive implicit yield violation\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\nWith common function to split iterable by chunks WPS raises `ImplicitYieldFromViolation`\r\n<!-- Describe what is not working. Please, attach a traceback. -->\r\n```\r\ndef chunks(value, chunk_size):\r\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\r\n for index in range(0, len(value), chunk_size):\r\n yield value[index:index + chunk_size]\r\n```\r\n\r\n## How is that should be\r\n`ImplicitYieldFromViolation` should be raised only if no other operations with iterable item.\r\n<!-- Describe how it should work. -->\r\n\r\n<!--\r\n\r\n Love wemake-python-styleguide? \u2764\ufe0f\r\n Do you want your bugs to be solved faster?\r\n\r\n Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/wemake-python-styleguide/donate\r\n\r\n-->\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List, Optional, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import AssignNodes, ForNodes\nfrom wemake_python_styleguide.compat.functions import get_assign_targets\nfrom wemake_python_styleguide.logic import (\n nodes,\n operators,\n slices,\n source,\n walk,\n)\nfrom wemake_python_styleguide.logic.variables import (\n is_valid_block_variable_definition,\n)\nfrom wemake_python_styleguide.types import AnyFor, AnyNodes\nfrom wemake_python_styleguide.violations.best_practices import (\n LambdaInsideLoopViolation,\n LoopVariableDefinitionViolation,\n YieldInComprehensionViolation,\n)\nfrom wemake_python_styleguide.violations.complexity import (\n TooManyForsInComprehensionViolation,\n)\nfrom wemake_python_styleguide.violations.consistency import (\n MultilineLoopViolation,\n MultipleIfsInComprehensionViolation,\n UselessContinueViolation,\n WrongLoopIterTypeViolation,\n)\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitItemsIteratorViolation,\n ImplicitSumViolation,\n ImplicitYieldFromViolation,\n UselessLoopElseViolation,\n)\nfrom wemake_python_styleguide.visitors import base, decorators\n\n_AnyLoop = Union[AnyFor, ast.While]\n_AnyComprehension = Union[\n ast.ListComp,\n ast.DictComp,\n ast.SetComp,\n ast.GeneratorExp,\n]\n\n\n@final\[email protected]('visit_any_comprehension', (\n 'visit_ListComp',\n 'visit_DictComp',\n 'visit_SetComp',\n 'visit_GeneratorExp',\n))\nclass WrongComprehensionVisitor(base.BaseNodeVisitor):\n \"\"\"Checks comprehensions for correctness.\"\"\"\n\n _max_ifs: ClassVar[int] = 1\n _max_fors: ClassVar[int] = 2\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._fors: DefaultDict[ast.AST, int] = defaultdict(int)\n\n def visit_comprehension(self, node: ast.comprehension) -> None:\n \"\"\"\n Finds multiple ``if`` and ``for`` nodes inside the comprehension.\n\n Raises:\n MultipleIfsInComprehensionViolation\n TooManyForsInComprehensionViolation\n\n \"\"\"\n self._check_ifs(node)\n self._check_fors(node)\n self.generic_visit(node)\n\n def visit_any_comprehension(self, node: _AnyComprehension) -> None:\n \"\"\"\n Finds incorrect patterns inside comprehensions.\n\n Raises:\n YieldInComprehensionViolation\n\n \"\"\"\n self._check_contains_yield(node)\n self.generic_visit(node)\n\n def _check_ifs(self, node: ast.comprehension) -> None:\n if len(node.ifs) > self._max_ifs:\n # We are trying to fix line number in the report,\n # since `comprehension` does not have this property.\n parent = nodes.get_parent(node) or node\n self.add_violation(MultipleIfsInComprehensionViolation(parent))\n\n def _check_fors(self, node: ast.comprehension) -> None:\n parent = nodes.get_parent(node)\n self._fors[parent] = len(parent.generators) # type: ignore\n\n def _check_contains_yield(self, node: _AnyComprehension) -> None:\n for sub_node in ast.walk(node):\n if isinstance(sub_node, ast.Yield):\n self.add_violation(YieldInComprehensionViolation(node))\n\n def _post_visit(self) -> None:\n for node, for_count in self._fors.items():\n if for_count > self._max_fors:\n self.add_violation(TooManyForsInComprehensionViolation(node))\n\n\n@final\[email protected]('visit_any_loop', (\n 'visit_For',\n 'visit_While',\n 'visit_AsyncFor',\n))\nclass WrongLoopVisitor(base.BaseNodeVisitor):\n \"\"\"Responsible for examining loops.\"\"\"\n\n def visit_any_loop(self, node: _AnyLoop) -> None:\n \"\"\"\n Checks ``for`` and ``while`` loops.\n\n Raises:\n UselessLoopElseViolation\n LambdaInsideLoopViolation\n MultilineLoopViolation\n\n \"\"\"\n self._check_loop_needs_else(node)\n self._check_lambda_inside_loop(node)\n self._check_useless_continue(node)\n self._check_multiline_loop(node)\n self.generic_visit(node)\n\n def _does_loop_contain_node( # TODO: move, reuse in annotations.py\n self,\n loop: Optional[_AnyLoop],\n to_check: ast.Break,\n ) -> bool:\n if loop is None:\n return False\n\n for inner_node in ast.walk(loop):\n # We are checking this specific node, not just any `break`:\n if to_check is inner_node:\n return True\n return False\n\n def _has_break(self, node: _AnyLoop) -> bool:\n closest_loop = None\n\n for subnode in ast.walk(node):\n if isinstance(subnode, (*ForNodes, ast.While)):\n if subnode is not node:\n closest_loop = subnode\n\n if isinstance(subnode, ast.Break):\n is_nested_break = self._does_loop_contain_node(\n closest_loop, subnode,\n )\n if not is_nested_break:\n return True\n return False\n\n def _check_loop_needs_else(self, node: _AnyLoop) -> None:\n if node.orelse and not self._has_break(node):\n self.add_violation(UselessLoopElseViolation(node))\n\n def _check_lambda_inside_loop(self, node: _AnyLoop) -> None:\n for subnode in node.body:\n if walk.is_contained(subnode, (ast.Lambda,)):\n self.add_violation(LambdaInsideLoopViolation(node))\n\n def _check_useless_continue(self, node: _AnyLoop) -> None:\n nodes_at_line: DefaultDict[int, List[ast.AST]] = defaultdict(list)\n for sub_node in ast.walk(node):\n lineno = getattr(sub_node, 'lineno', None)\n if lineno is not None:\n nodes_at_line[lineno].append(sub_node)\n\n last_line = nodes_at_line[sorted(nodes_at_line.keys())[-1]]\n if any(isinstance(last, ast.Continue) for last in last_line):\n self.add_violation(UselessContinueViolation(node))\n\n def _check_multiline_loop(self, node: _AnyLoop) -> None:\n start_lineno = getattr(node, 'lineno', None)\n\n if isinstance(node, ast.While):\n node_to_check = node.test\n else:\n node_to_check = node.iter\n\n for sub_node in ast.walk(node_to_check):\n sub_lineno = getattr(sub_node, 'lineno', None)\n if sub_lineno is not None and sub_lineno > start_lineno:\n self.add_violation(MultilineLoopViolation(node))\n break\n\n\n@final\[email protected]('visit_any_for', (\n 'visit_For',\n 'visit_AsyncFor',\n))\nclass WrongLoopDefinitionVisitor(base.BaseNodeVisitor):\n \"\"\"Responsible for ``for`` loops and comprehensions definitions.\"\"\"\n\n _forbidden_for_iters: ClassVar[AnyNodes] = (\n ast.List,\n ast.ListComp,\n ast.Dict,\n ast.DictComp,\n ast.Set,\n ast.SetComp,\n ast.GeneratorExp,\n ast.Num,\n ast.NameConstant,\n )\n\n def visit_any_for(self, node: AnyFor) -> None:\n \"\"\"\n Ensures that ``for`` loop definitions are correct.\n\n Raises:\n LoopVariableDefinitionViolation\n WrongLoopIterTypeViolation\n ImplicitSumViolation\n\n \"\"\"\n self._check_variable_definitions(node.target)\n self._check_explicit_iter_type(node)\n self._check_implicit_sum(node)\n self._check_implicit_yield_from(node)\n self.generic_visit(node)\n\n def visit_comprehension(self, node: ast.comprehension) -> None:\n \"\"\"\n Ensures that comprehension definitions are correct.\n\n Raises:\n LoopVariableDefinitionViolation\n\n \"\"\"\n self._check_variable_definitions(node.target)\n self._check_explicit_iter_type(node)\n self.generic_visit(node)\n\n def _check_variable_definitions(self, node: ast.AST) -> None:\n if not is_valid_block_variable_definition(node):\n self.add_violation(LoopVariableDefinitionViolation(node))\n\n def _check_explicit_iter_type(\n self,\n node: Union[AnyFor, ast.comprehension],\n ) -> None:\n node_iter = operators.unwrap_unary_node(node.iter)\n is_wrong = isinstance(node_iter, self._forbidden_for_iters)\n is_empty = isinstance(node_iter, ast.Tuple) and not node_iter.elts\n if is_wrong or is_empty:\n self.add_violation(WrongLoopIterTypeViolation(node_iter))\n\n def _check_implicit_sum(self, node: AnyFor) -> None:\n is_implicit_sum = (\n len(node.body) == 1 and\n isinstance(node.body[0], ast.AugAssign) and\n isinstance(node.body[0].op, ast.Add) and\n isinstance(node.body[0].target, ast.Name)\n )\n if is_implicit_sum:\n self.add_violation(ImplicitSumViolation(node))\n\n def _check_implicit_yield_from(self, node: AnyFor) -> None:\n is_implicit_yield_from = (\n len(node.body) == 1 and\n isinstance(node.body[0], ast.Expr) and\n isinstance(node.body[0].value, ast.Yield)\n )\n if is_implicit_yield_from:\n self.add_violation(ImplicitYieldFromViolation(node))\n\n\n@final\nclass SyncForLoopVisitor(base.BaseNodeVisitor):\n \"\"\"We use this visitor to check just sync ``for`` loops.\"\"\"\n\n def visit_For(self, node: ast.For) -> None:\n \"\"\"\n Checks for hidden patterns in sync loops.\n\n Raises:\n ImplicitItemsIteratorViolation\n\n \"\"\"\n self._check_implicit_items(node)\n self.generic_visit(node)\n\n def _check_implicit_items(self, node: ast.For) -> None:\n iterable = source.node_to_string(node.iter)\n target = source.node_to_string(node.target)\n\n for sub in ast.walk(node):\n if isinstance(sub, ast.Subscript):\n if self._is_assigned_target(sub):\n continue\n\n if slices.is_same_slice(iterable, target, sub):\n self.add_violation(ImplicitItemsIteratorViolation(node))\n break\n\n def _is_assigned_target(self, node: ast.Subscript) -> bool:\n parent = nodes.get_parent(node)\n if not isinstance(parent, (*AssignNodes, ast.AugAssign)):\n return False\n return any(node == target for target in get_assign_targets(parent))\n", "path": "wemake_python_styleguide/visitors/ast/loops.py"}]} | 4,030 | 177 |
gh_patches_debug_2618 | rasdani/github-patches | git_diff | pyca__cryptography-4064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a python_requires to our setup.py
cc: @dstufft
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 from __future__ import absolute_import, division, print_function
8
9 import os
10 import platform
11 import subprocess
12 import sys
13 from distutils.command.build import build
14
15 import pkg_resources
16
17 import setuptools
18 from setuptools import find_packages, setup
19 from setuptools.command.install import install
20 from setuptools.command.test import test
21
22
23 if (
24 pkg_resources.parse_version(setuptools.__version__) <
25 pkg_resources.parse_version("18.5")
26 ):
27 raise RuntimeError(
28 "cryptography requires setuptools 18.5 or newer, please upgrade to a "
29 "newer version of setuptools"
30 )
31
32 base_dir = os.path.dirname(__file__)
33 src_dir = os.path.join(base_dir, "src")
34
35 # When executing the setup.py, we need to be able to import ourselves, this
36 # means that we need to add the src/ directory to the sys.path.
37 sys.path.insert(0, src_dir)
38
39 about = {}
40 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
41 exec(f.read(), about)
42
43
44 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__'])
45
46 setup_requirements = []
47
48 if platform.python_implementation() == "PyPy":
49 if sys.pypy_version_info < (5, 3):
50 raise RuntimeError(
51 "cryptography 1.9 is not compatible with PyPy < 5.3. Please "
52 "upgrade PyPy to use this library."
53 )
54 else:
55 setup_requirements.append("cffi>=1.7")
56
57 test_requirements = [
58 "pytest>=3.2.1,!=3.3.0",
59 "pretend",
60 "iso8601",
61 "pytz",
62 "hypothesis>=1.11.4",
63 ]
64
65
66 # If there's no vectors locally that probably means we are in a tarball and
67 # need to go and get the matching vectors package from PyPi
68 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")):
69 test_requirements.append(VECTORS_DEPENDENCY)
70
71
72 class PyTest(test):
73 def finalize_options(self):
74 test.finalize_options(self)
75 self.test_args = []
76 self.test_suite = True
77
78 # This means there's a vectors/ folder with the package in here.
79 # cd into it, install the vectors package and then refresh sys.path
80 if VECTORS_DEPENDENCY not in test_requirements:
81 subprocess.check_call(
82 [sys.executable, "setup.py", "install"], cwd="vectors"
83 )
84 pkg_resources.get_distribution("cryptography_vectors").activate()
85
86 def run_tests(self):
87 # Import here because in module scope the eggs are not loaded.
88 import pytest
89 test_args = [os.path.join(base_dir, "tests")]
90 errno = pytest.main(test_args)
91 sys.exit(errno)
92
93
94 def keywords_with_side_effects(argv):
95 """
96 Get a dictionary with setup keywords that (can) have side effects.
97
98 :param argv: A list of strings with command line arguments.
99 :returns: A dictionary with keyword arguments for the ``setup()`` function.
100
101 This setup.py script uses the setuptools 'setup_requires' feature because
102 this is required by the cffi package to compile extension modules. The
103 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
104 build process as a result of setup.py invocations that don't need the cffi
105 module to be built (setup.py serves the dual purpose of exposing package
106 metadata).
107
108 All of the options listed by ``python setup.py --help`` that print
109 information should be recognized here. The commands ``clean``,
110 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
111 Any combination of these options and commands is also supported.
112
113 This function was originally based on the `setup.py script`_ of SciPy (see
114 also the discussion in `pip issue #25`_).
115
116 .. _pip issue #25: https://github.com/pypa/pip/issues/25
117 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
118 """
119 no_setup_requires_arguments = (
120 '-h', '--help',
121 '-n', '--dry-run',
122 '-q', '--quiet',
123 '-v', '--verbose',
124 '-V', '--version',
125 '--author',
126 '--author-email',
127 '--classifiers',
128 '--contact',
129 '--contact-email',
130 '--description',
131 '--egg-base',
132 '--fullname',
133 '--help-commands',
134 '--keywords',
135 '--licence',
136 '--license',
137 '--long-description',
138 '--maintainer',
139 '--maintainer-email',
140 '--name',
141 '--no-user-cfg',
142 '--obsoletes',
143 '--platforms',
144 '--provides',
145 '--requires',
146 '--url',
147 'clean',
148 'egg_info',
149 'register',
150 'sdist',
151 'upload',
152 )
153
154 def is_short_option(argument):
155 """Check whether a command line argument is a short option."""
156 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
157
158 def expand_short_options(argument):
159 """Expand combined short options into canonical short options."""
160 return ('-' + char for char in argument[1:])
161
162 def argument_without_setup_requirements(argv, i):
163 """Check whether a command line argument needs setup requirements."""
164 if argv[i] in no_setup_requires_arguments:
165 # Simple case: An argument which is either an option or a command
166 # which doesn't need setup requirements.
167 return True
168 elif (is_short_option(argv[i]) and
169 all(option in no_setup_requires_arguments
170 for option in expand_short_options(argv[i]))):
171 # Not so simple case: Combined short options none of which need
172 # setup requirements.
173 return True
174 elif argv[i - 1:i] == ['--egg-base']:
175 # Tricky case: --egg-info takes an argument which should not make
176 # us use setup_requires (defeating the purpose of this code).
177 return True
178 else:
179 return False
180
181 if all(argument_without_setup_requirements(argv, i)
182 for i in range(1, len(argv))):
183 return {
184 "cmdclass": {
185 "build": DummyBuild,
186 "install": DummyInstall,
187 "test": DummyPyTest,
188 }
189 }
190 else:
191 cffi_modules = [
192 "src/_cffi_src/build_openssl.py:ffi",
193 "src/_cffi_src/build_constant_time.py:ffi",
194 "src/_cffi_src/build_padding.py:ffi",
195 ]
196
197 return {
198 "setup_requires": setup_requirements,
199 "cmdclass": {
200 "test": PyTest,
201 },
202 "cffi_modules": cffi_modules
203 }
204
205
206 setup_requires_error = ("Requested setup command that needs 'setup_requires' "
207 "while command line arguments implied a side effect "
208 "free command or option.")
209
210
211 class DummyBuild(build):
212 """
213 This class makes it very obvious when ``keywords_with_side_effects()`` has
214 incorrectly interpreted the command line arguments to ``setup.py build`` as
215 one of the 'side effect free' commands or options.
216 """
217
218 def run(self):
219 raise RuntimeError(setup_requires_error)
220
221
222 class DummyInstall(install):
223 """
224 This class makes it very obvious when ``keywords_with_side_effects()`` has
225 incorrectly interpreted the command line arguments to ``setup.py install``
226 as one of the 'side effect free' commands or options.
227 """
228
229 def run(self):
230 raise RuntimeError(setup_requires_error)
231
232
233 class DummyPyTest(test):
234 """
235 This class makes it very obvious when ``keywords_with_side_effects()`` has
236 incorrectly interpreted the command line arguments to ``setup.py test`` as
237 one of the 'side effect free' commands or options.
238 """
239
240 def run_tests(self):
241 raise RuntimeError(setup_requires_error)
242
243
244 with open(os.path.join(base_dir, "README.rst")) as f:
245 long_description = f.read()
246
247
248 setup(
249 name=about["__title__"],
250 version=about["__version__"],
251
252 description=about["__summary__"],
253 long_description=long_description,
254 license=about["__license__"],
255 url=about["__uri__"],
256
257 author=about["__author__"],
258 author_email=about["__email__"],
259
260 classifiers=[
261 "Intended Audience :: Developers",
262 "License :: OSI Approved :: Apache Software License",
263 "License :: OSI Approved :: BSD License",
264 "Natural Language :: English",
265 "Operating System :: MacOS :: MacOS X",
266 "Operating System :: POSIX",
267 "Operating System :: POSIX :: BSD",
268 "Operating System :: POSIX :: Linux",
269 "Operating System :: Microsoft :: Windows",
270 "Programming Language :: Python",
271 "Programming Language :: Python :: 2",
272 "Programming Language :: Python :: 2.7",
273 "Programming Language :: Python :: 3",
274 "Programming Language :: Python :: 3.4",
275 "Programming Language :: Python :: 3.5",
276 "Programming Language :: Python :: 3.6",
277 "Programming Language :: Python :: Implementation :: CPython",
278 "Programming Language :: Python :: Implementation :: PyPy",
279 "Topic :: Security :: Cryptography",
280 ],
281
282 package_dir={"": "src"},
283 packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
284 include_package_data=True,
285
286 install_requires=[
287 "idna >= 2.1",
288 "asn1crypto >= 0.21.0",
289 "six >= 1.4.1",
290 ],
291 tests_require=test_requirements,
292 extras_require={
293 ":python_version < '3'": ["enum34", "ipaddress"],
294 ":platform_python_implementation != 'PyPy'": ["cffi >= 1.7"],
295
296 "test": test_requirements,
297 "docstest": [
298 "doc8",
299 "pyenchant >= 1.6.11",
300 "readme_renderer >= 16.0",
301 "sphinx >= 1.6.5",
302 "sphinx_rtd_theme",
303 "sphinxcontrib-spelling >= 4.0.1",
304 ],
305 "pep8test": [
306 "flake8",
307 "flake8-import-order",
308 "pep8-naming",
309 ],
310 },
311
312 # for cffi
313 zip_safe=False,
314 ext_package="cryptography.hazmat.bindings",
315 **keywords_with_side_effects(sys.argv)
316 )
317
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -283,6 +283,8 @@
packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
include_package_data=True,
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
+
install_requires=[
"idna >= 2.1",
"asn1crypto >= 0.21.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -283,6 +283,8 @@\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n \n+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n+\n install_requires=[\n \"idna >= 2.1\",\n \"asn1crypto >= 0.21.0\",\n", "issue": "Add a python_requires to our setup.py\ncc: @dstufft \n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nimport setuptools\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nif (\n pkg_resources.parse_version(setuptools.__version__) <\n pkg_resources.parse_version(\"18.5\")\n):\n raise RuntimeError(\n \"cryptography requires setuptools 18.5 or newer, please upgrade to a \"\n \"newer version of setuptools\"\n )\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nsetup_requirements = []\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (5, 3):\n raise RuntimeError(\n \"cryptography 1.9 is not compatible with PyPy < 5.3. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n setup_requirements.append(\"cffi>=1.7\")\n\ntest_requirements = [\n \"pytest>=3.2.1,!=3.3.0\",\n \"pretend\",\n \"iso8601\",\n \"pytz\",\n \"hypothesis>=1.11.4\",\n]\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=[\n \"idna >= 2.1\",\n \"asn1crypto >= 0.21.0\",\n \"six >= 1.4.1\",\n ],\n tests_require=test_requirements,\n extras_require={\n \":python_version < '3'\": [\"enum34\", \"ipaddress\"],\n \":platform_python_implementation != 'PyPy'\": [\"cffi >= 1.7\"],\n\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"readme_renderer >= 16.0\",\n \"sphinx >= 1.6.5\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]} | 3,735 | 122 |
gh_patches_debug_1317 | rasdani/github-patches | git_diff | sopel-irc__sopel-1325 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bugzilla] Error calling shutdown method for module bugzilla:None
Noticed this in my logs. Bugzilla shutdown throwing none. On Sopel 6.5.3, Python 3.5.3.
```
Ping timeout reached after 120 seconds, closing connection
Calling shutdown for 2 modules.
calling reddit.shutdown
calling bugzilla.shutdown
Error calling shutdown method for module bugzilla:None
Closed!
Warning: Disconnected. Reconnecting in 20 seconds...
Welcome to Sopel. Loading modules...
```
</issue>
<code>
[start of sopel/modules/bugzilla.py]
1 # coding=utf-8
2 """Bugzilla issue reporting module
3
4 Copyright 2013-2015, Embolalia, embolalia.com
5 Licensed under the Eiffel Forum License 2.
6 """
7 from __future__ import unicode_literals, absolute_import, print_function, division
8
9 import re
10
11 import xmltodict
12
13 from sopel import web, tools
14 from sopel.config.types import StaticSection, ListAttribute
15 from sopel.logger import get_logger
16 from sopel.module import rule
17
18
19 regex = None
20 LOGGER = get_logger(__name__)
21
22
23 class BugzillaSection(StaticSection):
24 domains = ListAttribute('domains')
25 """The domains of the Bugzilla instances from which to get information."""
26
27
28 def configure(config):
29 config.define_section('bugzilla', BugzillaSection)
30 config.bugzilla.configure_setting(
31 'domains',
32 'Enter the domains of the Bugzillas you want extra information '
33 'from (e.g. bugzilla.gnome.org)'
34 )
35
36
37 def setup(bot):
38 global regex
39 bot.config.define_section('bugzilla', BugzillaSection)
40
41 if not bot.config.bugzilla.domains:
42 return
43 if not bot.memory.contains('url_callbacks'):
44 bot.memory['url_callbacks'] = tools.SopelMemory()
45
46 domains = '|'.join(bot.config.bugzilla.domains)
47 regex = re.compile((r'https?://(%s)'
48 '(/show_bug.cgi\?\S*?)'
49 '(id=\d+)')
50 % domains)
51 bot.memory['url_callbacks'][regex] = show_bug
52
53
54 def shutdown(bot):
55 del bot.memory['url_callbacks'][regex]
56
57
58 @rule(r'.*https?://(\S+?)'
59 '(/show_bug.cgi\?\S*?)'
60 '(id=\d+).*')
61 def show_bug(bot, trigger, match=None):
62 """Show information about a Bugzilla bug."""
63 match = match or trigger
64 domain = match.group(1)
65 if domain not in bot.config.bugzilla.domains:
66 return
67 url = 'https://%s%sctype=xml&%s' % match.groups()
68 data = web.get(url, dont_decode=True)
69 bug = xmltodict.parse(data).get('bugzilla').get('bug')
70 error = bug.get('@error', None) # error="NotPermitted"
71
72 if error:
73 LOGGER.warning('Bugzilla error: %s' % error)
74 bot.say('[BUGZILLA] Unable to get infomation for '
75 'linked bug (%s)' % error)
76 return
77
78 message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +
79 'Importance: %s | Status: %s | Assigned to: %s | ' +
80 'Reported: %s | Modified: %s')
81
82 resolution = bug.get('resolution')
83 if resolution is not None:
84 status = bug.get('bug_status') + ' ' + resolution
85 else:
86 status = bug.get('bug_status')
87
88 assigned_to = bug.get('assigned_to')
89 if isinstance(assigned_to, dict):
90 assigned_to = assigned_to.get('@name')
91
92 message = message % (
93 bug.get('short_desc'), bug.get('product'),
94 bug.get('component'), bug.get('version'),
95 (bug.get('priority') + ' ' + bug.get('bug_severity')),
96 status, assigned_to, bug.get('creation_ts'),
97 bug.get('delta_ts'))
98 bot.say(message)
99
[end of sopel/modules/bugzilla.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/bugzilla.py b/sopel/modules/bugzilla.py
--- a/sopel/modules/bugzilla.py
+++ b/sopel/modules/bugzilla.py
@@ -52,7 +52,12 @@
def shutdown(bot):
- del bot.memory['url_callbacks'][regex]
+ try:
+ del bot.memory['url_callbacks'][regex]
+ except KeyError:
+ # bot.config.bugzilla.domains was probably just empty on startup
+ # everything's daijoubu
+ pass
@rule(r'.*https?://(\S+?)'
| {"golden_diff": "diff --git a/sopel/modules/bugzilla.py b/sopel/modules/bugzilla.py\n--- a/sopel/modules/bugzilla.py\n+++ b/sopel/modules/bugzilla.py\n@@ -52,7 +52,12 @@\n \n \n def shutdown(bot):\n- del bot.memory['url_callbacks'][regex]\n+ try:\n+ del bot.memory['url_callbacks'][regex]\n+ except KeyError:\n+ # bot.config.bugzilla.domains was probably just empty on startup\n+ # everything's daijoubu\n+ pass\n \n \n @rule(r'.*https?://(\\S+?)'\n", "issue": "[Bugzilla] Error calling shutdown method for module bugzilla:None\nNoticed this in my logs. Bugzilla shutdown throwing none. On Sopel 6.5.3, Python 3.5.3.\r\n\r\n```\r\nPing timeout reached after 120 seconds, closing connection\r\nCalling shutdown for 2 modules.\r\ncalling reddit.shutdown\r\ncalling bugzilla.shutdown\r\nError calling shutdown method for module bugzilla:None\r\nClosed!\r\nWarning: Disconnected. Reconnecting in 20 seconds...\r\n \r\nWelcome to Sopel. Loading modules...\r\n```\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Bugzilla issue reporting module\n\nCopyright 2013-2015, Embolalia, embolalia.com\nLicensed under the Eiffel Forum License 2.\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport xmltodict\n\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ListAttribute\nfrom sopel.logger import get_logger\nfrom sopel.module import rule\n\n\nregex = None\nLOGGER = get_logger(__name__)\n\n\nclass BugzillaSection(StaticSection):\n domains = ListAttribute('domains')\n \"\"\"The domains of the Bugzilla instances from which to get information.\"\"\"\n\n\ndef configure(config):\n config.define_section('bugzilla', BugzillaSection)\n config.bugzilla.configure_setting(\n 'domains',\n 'Enter the domains of the Bugzillas you want extra information '\n 'from (e.g. bugzilla.gnome.org)'\n )\n\n\ndef setup(bot):\n global regex\n bot.config.define_section('bugzilla', BugzillaSection)\n\n if not bot.config.bugzilla.domains:\n return\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n\n domains = '|'.join(bot.config.bugzilla.domains)\n regex = re.compile((r'https?://(%s)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+)')\n % domains)\n bot.memory['url_callbacks'][regex] = show_bug\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][regex]\n\n\n@rule(r'.*https?://(\\S+?)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+).*')\ndef show_bug(bot, trigger, match=None):\n \"\"\"Show information about a Bugzilla bug.\"\"\"\n match = match or trigger\n domain = match.group(1)\n if domain not in bot.config.bugzilla.domains:\n return\n url = 'https://%s%sctype=xml&%s' % match.groups()\n data = web.get(url, dont_decode=True)\n bug = xmltodict.parse(data).get('bugzilla').get('bug')\n error = bug.get('@error', None) # error=\"NotPermitted\"\n\n if error:\n LOGGER.warning('Bugzilla error: %s' % error)\n bot.say('[BUGZILLA] Unable to get infomation for '\n 'linked bug (%s)' % error)\n return\n\n message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +\n 'Importance: %s | Status: %s | Assigned to: %s | ' +\n 'Reported: %s | Modified: %s')\n\n resolution = bug.get('resolution')\n if resolution is not None:\n status = bug.get('bug_status') + ' ' + resolution\n else:\n status = bug.get('bug_status')\n\n assigned_to = bug.get('assigned_to')\n if isinstance(assigned_to, dict):\n assigned_to = assigned_to.get('@name')\n\n message = message % (\n bug.get('short_desc'), bug.get('product'),\n bug.get('component'), bug.get('version'),\n (bug.get('priority') + ' ' + bug.get('bug_severity')),\n status, assigned_to, bug.get('creation_ts'),\n bug.get('delta_ts'))\n bot.say(message)\n", "path": "sopel/modules/bugzilla.py"}]} | 1,611 | 138 |
gh_patches_debug_50359 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gevent hook unnecessarily bundles ~17MB of "stuff"
## Description of the issue
The included `gevent` hook bundles about 17MB of unnecessary files.
These include:
- HTML documentation
- `__pycache__` folders
- tests
- Generated `.c` and `.h` files
## Context information (for bug reports)
* Output of `pyinstaller --version`: `3.6`
* Version of Python: `3.7.7`
* Platform: macOS 10.15.4 / Ubuntu 18.04
### A minimal example program which shows the error
`test.py`:
```
import gevent
```
Commands
```shell
$ pip install gevent pyinstaller
[...]
$ pyinstaller -D test.py
[...]
$ find dist/test/gevent -name "*.c" -or -name "*.h" -or -name "*.html" -or -path "*/__pycache__/*" -or -path "*/tests/*" | xargs du -shc
[...]
17M total
```
</issue>
<code>
[start of PyInstaller/hooks/hook-gevent.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2015-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 from PyInstaller.utils.hooks import collect_all
13
14 excludedimports = ["gevent.testing", "gevent.tests"]
15
16 datas, binaries, hiddenimports = collect_all(
17 'gevent',
18 filter_submodules=lambda name: (
19 "gevent.testing" not in name or "gevent.tests" not in name))
20
[end of PyInstaller/hooks/hook-gevent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-gevent.py b/PyInstaller/hooks/hook-gevent.py
--- a/PyInstaller/hooks/hook-gevent.py
+++ b/PyInstaller/hooks/hook-gevent.py
@@ -16,4 +16,6 @@
datas, binaries, hiddenimports = collect_all(
'gevent',
filter_submodules=lambda name: (
- "gevent.testing" not in name or "gevent.tests" not in name))
+ "gevent.testing" not in name or "gevent.tests" not in name),
+ include_py_files=False,
+ exclude_datas=["**/tests"])
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-gevent.py b/PyInstaller/hooks/hook-gevent.py\n--- a/PyInstaller/hooks/hook-gevent.py\n+++ b/PyInstaller/hooks/hook-gevent.py\n@@ -16,4 +16,6 @@\n datas, binaries, hiddenimports = collect_all(\n 'gevent',\n filter_submodules=lambda name: (\n- \"gevent.testing\" not in name or \"gevent.tests\" not in name))\n+ \"gevent.testing\" not in name or \"gevent.tests\" not in name),\n+ include_py_files=False,\n+ exclude_datas=[\"**/tests\"])\n", "issue": "gevent hook unnecessarily bundles ~17MB of \"stuff\"\n## Description of the issue\r\n\r\nThe included `gevent` hook bundles about 17MB of unnecessary files.\r\nThese include:\r\n- HTML documentation\r\n- `__pycache__` folders\r\n- tests\r\n- Generated `.c` and `.h` files\r\n\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: `3.6`\r\n* Version of Python: `3.7.7`\r\n* Platform: macOS 10.15.4 / Ubuntu 18.04\r\n\r\n### A minimal example program which shows the error\r\n\r\n`test.py`:\r\n```\r\nimport gevent\r\n```\r\n\r\nCommands\r\n```shell\r\n$ pip install gevent pyinstaller\r\n[...]\r\n$ pyinstaller -D test.py\r\n[...]\r\n$ find dist/test/gevent -name \"*.c\" -or -name \"*.h\" -or -name \"*.html\" -or -path \"*/__pycache__/*\" -or -path \"*/tests/*\" | xargs du -shc\r\n[...]\r\n 17M\ttotal\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_all\n\nexcludedimports = [\"gevent.testing\", \"gevent.tests\"]\n\ndatas, binaries, hiddenimports = collect_all(\n 'gevent',\n filter_submodules=lambda name: (\n \"gevent.testing\" not in name or \"gevent.tests\" not in name))\n", "path": "PyInstaller/hooks/hook-gevent.py"}]} | 972 | 143 |
gh_patches_debug_8955 | rasdani/github-patches | git_diff | google__jax-13884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip install failed and got UnicodeDecodeError for jax>=0.3.15 on Windows, Python 3.9.13
### Description
I got `UnicodeDecodeError` when running pip install for jax `0.3.15` on Windows 10:
```cmd
PS C:\tensorflow> pip install --no-cache-dir jax==0.3.15
Collecting jax==0.3.15
Downloading jax-0.3.15.tar.gz (1.0 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.0/1.0 MB 3.5 MB/s eta 0:00:00
Preparing metadata (setup.py) ... error
error: subprocess-exited-with-error
× python setup.py egg_info did not run successfully.
│ exit code: 1
╰─> [6 lines of output]
Traceback (most recent call last):
File "<string>", line 2, in <module>
File "<pip-setuptools-caller>", line 34, in <module>
File "C:\Users\Jason Wu\AppData\Local\Temp\pip-install-5rpko2f6\jax_4375f8ec13e046228768476f1ee3a194\setup.py", line 33, in <module>
_long_description = f.read()
UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1304: illegal multibyte sequence
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
× Encountered error while generating package metadata.
╰─> See above for output.
note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
```
Also I tried `0.4.1` and `0.3.25`, both got the same error:
```cmd
PS C:\tensorflow> pip install --no-cache-dir jax==0.4.1
Collecting jax==0.4.1
Downloading jax-0.4.1.tar.gz (1.2 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 3.6 MB/s eta 0:00:00
Preparing metadata (setup.py) ... error
error: subprocess-exited-with-error
× python setup.py egg_info did not run successfully.
│ exit code: 1
╰─> [6 lines of output]
Traceback (most recent call last):
File "<string>", line 2, in <module>
File "<pip-setuptools-caller>", line 34, in <module>
File "C:\Users\Jason Wu\AppData\Local\Temp\pip-install-trctdimm\jax_3d064f6acc4d4a0b91bcebef44bb0eb7\setup.py", line 38, in <module>
_long_description = f.read()
UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1301: illegal multibyte sequence
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
× Encountered error while generating package metadata.
╰─> See above for output.
note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
```
```cmd
PS C:\tensorflow> pip install --no-cache-dir jax==0.3.25
Collecting jax==0.3.25
Downloading jax-0.3.25.tar.gz (1.1 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.1/1.1 MB 4.2 MB/s eta 0:00:00
Preparing metadata (setup.py) ... error
error: subprocess-exited-with-error
× python setup.py egg_info did not run successfully.
│ exit code: 1
╰─> [6 lines of output]
Traceback (most recent call last):
File "<string>", line 2, in <module>
File "<pip-setuptools-caller>", line 34, in <module>
File "C:\Users\Jason Wu\AppData\Local\Temp\pip-install-h_ymbbfo\jax_9f7e4365d3a24fb4bb4e74d1b86678e6\setup.py", line 38, in <module>
_long_description = f.read()
UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1304: illegal multibyte sequence
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
× Encountered error while generating package metadata.
╰─> See above for output.
note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
```
It's worth noting that `0.3.14` installed without issue:
```cmd
PS C:\tensorflow> pip install --no-cache-dir jax==0.3.14
Collecting jax==0.3.14
Downloading jax-0.3.14.tar.gz (990 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 990.1/990.1 kB 2.6 MB/s eta 0:00:00
Preparing metadata (setup.py) ... done
Requirement already satisfied: absl-py ...
...
Building wheels for collected packages: jax
Building wheel for jax (setup.py) ... done
Created wheel for jax: filename=jax-0.3.14-py3-none-any.whl size=1147576 sha256=ae0aadd2688c543930de9ba7e31e58b3363a8752ae25e4770941b53e1c3476d6
Stored in directory: C:\Users\Jason Wu\AppData\Local\Temp\pip-ephem-wheel-cache-q92x7zxq\wheels\3d\22\cf\75c40ec058e8d0f9b8427cd35366e1fd618475451fc8fc6fd7
Successfully built jax
Installing collected packages: jax
Successfully installed jax-0.3.14
```
Since I'm building Tensorflow from source, it requires Jax > `0.3.15`.
Please let me know if there is any other information that needs to be provided. thanks!
### What jax/jaxlib version are you using?
jax v0.3.15
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.9.13, pip 22.3.1, Windows 10,
### NVIDIA GPU info
_No response_
</issue>
<code>
[start of setup.py]
1 # Copyright 2018 The JAX Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from distutils import spawn
16 import subprocess
17 import os
18 import sys
19
20 from setuptools import setup, find_packages
21
22 _current_jaxlib_version = '0.4.1'
23 # The following should be updated with each new jaxlib release.
24 _latest_jaxlib_version_on_pypi = '0.4.1'
25 _available_cuda_versions = ['11']
26 _default_cuda_version = '11'
27 _available_cudnn_versions = ['82', '86']
28 _default_cudnn_version = '86'
29 _libtpu_version = '0.1.dev20221212'
30
31 _dct = {}
32 with open('jax/version.py') as f:
33 exec(f.read(), _dct)
34 __version__ = _dct['__version__']
35 _minimum_jaxlib_version = _dct['_minimum_jaxlib_version']
36
37 with open('README.md') as f:
38 _long_description = f.read()
39
40 if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
41 protoc = os.environ['PROTOC']
42 else:
43 protoc = spawn.find_executable('protoc')
44
45 def generate_proto(source):
46 if not protoc or not os.path.exists(source):
47 return
48 protoc_command = [protoc, '-I.', '--python_out=.', source]
49 if subprocess.call(protoc_command) != 0:
50 sys.exit(-1)
51
52 generate_proto("jax/experimental/australis/executable.proto")
53 generate_proto("jax/experimental/australis/petri.proto")
54
55 setup(
56 name='jax',
57 version=__version__,
58 description='Differentiate, compile, and transform Numpy code.',
59 long_description=_long_description,
60 long_description_content_type='text/markdown',
61 author='JAX team',
62 author_email='[email protected]',
63 packages=find_packages(exclude=["examples"]),
64 package_data={'jax': ['py.typed', "*.pyi", "**/*.pyi"]},
65 python_requires='>=3.8',
66 install_requires=[
67 'numpy>=1.20',
68 'opt_einsum',
69 'scipy>=1.5',
70 ],
71 extras_require={
72 # Minimum jaxlib version; used in testing.
73 'minimum-jaxlib': [f'jaxlib=={_minimum_jaxlib_version}'],
74
75 # CPU-only jaxlib can be installed via:
76 # $ pip install jax[cpu]
77 'cpu': [f'jaxlib=={_current_jaxlib_version}'],
78
79 # Used only for CI builds that install JAX from github HEAD.
80 'ci': [f'jaxlib=={_latest_jaxlib_version_on_pypi}'],
81
82 # Cloud TPU VM jaxlib can be installed via:
83 # $ pip install jax[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html
84 'tpu': [f'jaxlib=={_current_jaxlib_version}',
85 f'libtpu-nightly=={_libtpu_version}',
86 # Required by cloud_tpu_init.py
87 'requests'],
88
89 # $ pip install jax[australis]
90 'australis': ['protobuf>=3.13,<4'],
91
92 # CUDA installations require adding jax releases URL; e.g.
93 # Cuda installation defaulting to a CUDA and Cudnn version defined above.
94 # $ pip install jax[cuda] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
95 'cuda': [f"jaxlib=={_current_jaxlib_version}+cuda{_default_cuda_version}.cudnn{_default_cudnn_version}"],
96
97 # CUDA installations require adding jax releases URL; e.g.
98 # $ pip install jax[cuda11_cudnn82] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
99 # $ pip install jax[cuda11_cudnn86] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
100 **{f'cuda{cuda_version}_cudnn{cudnn_version}': f"jaxlib=={_current_jaxlib_version}+cuda{cuda_version}.cudnn{cudnn_version}"
101 for cuda_version in _available_cuda_versions for cudnn_version in _available_cudnn_versions}
102 },
103 url='https://github.com/google/jax',
104 license='Apache-2.0',
105 classifiers=[
106 "Programming Language :: Python :: 3.8",
107 "Programming Language :: Python :: 3.9",
108 "Programming Language :: Python :: 3.10",
109 "Programming Language :: Python :: 3.11",
110 ],
111 zip_safe=False,
112 )
113
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,12 +29,12 @@
_libtpu_version = '0.1.dev20221212'
_dct = {}
-with open('jax/version.py') as f:
+with open('jax/version.py', encoding='utf-8') as f:
exec(f.read(), _dct)
__version__ = _dct['__version__']
_minimum_jaxlib_version = _dct['_minimum_jaxlib_version']
-with open('README.md') as f:
+with open('README.md', encoding='utf-8') as f:
_long_description = f.read()
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,12 +29,12 @@\n _libtpu_version = '0.1.dev20221212'\n \n _dct = {}\n-with open('jax/version.py') as f:\n+with open('jax/version.py', encoding='utf-8') as f:\n exec(f.read(), _dct)\n __version__ = _dct['__version__']\n _minimum_jaxlib_version = _dct['_minimum_jaxlib_version']\n \n-with open('README.md') as f:\n+with open('README.md', encoding='utf-8') as f:\n _long_description = f.read()\n \n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n", "issue": "pip install failed and got UnicodeDecodeError for jax>=0.3.15 on Windows, Python 3.9.13\n### Description\n\nI got `UnicodeDecodeError` when running pip install for jax `0.3.15` on Windows 10:\r\n```cmd\r\nPS C:\\tensorflow> pip install --no-cache-dir jax==0.3.15\r\nCollecting jax==0.3.15\r\n Downloading jax-0.3.15.tar.gz (1.0 MB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 1.0/1.0 MB 3.5 MB/s eta 0:00:00\r\n Preparing metadata (setup.py) ... error\r\n error: subprocess-exited-with-error\r\n\r\n \u00d7 python setup.py egg_info did not run successfully.\r\n \u2502 exit code: 1\r\n \u2570\u2500> [6 lines of output]\r\n Traceback (most recent call last):\r\n File \"<string>\", line 2, in <module>\r\n File \"<pip-setuptools-caller>\", line 34, in <module>\r\n File \"C:\\Users\\Jason Wu\\AppData\\Local\\Temp\\pip-install-5rpko2f6\\jax_4375f8ec13e046228768476f1ee3a194\\setup.py\", line 33, in <module>\r\n _long_description = f.read()\r\n UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1304: illegal multibyte sequence\r\n [end of output]\r\n\r\n note: This error originates from a subprocess, and is likely not a problem with pip.\r\nerror: metadata-generation-failed\r\n\r\n\u00d7 Encountered error while generating package metadata.\r\n\u2570\u2500> See above for output.\r\n\r\nnote: This is an issue with the package mentioned above, not pip.\r\nhint: See above for details.\r\n```\r\n\r\nAlso I tried `0.4.1` and `0.3.25`, both got the same error:\r\n```cmd\r\nPS C:\\tensorflow> pip install --no-cache-dir jax==0.4.1\r\nCollecting jax==0.4.1\r\n Downloading jax-0.4.1.tar.gz (1.2 MB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 1.2/1.2 MB 3.6 MB/s eta 0:00:00\r\n Preparing metadata (setup.py) ... error\r\n error: subprocess-exited-with-error\r\n\r\n \u00d7 python setup.py egg_info did not run successfully.\r\n \u2502 exit code: 1\r\n \u2570\u2500> [6 lines of output]\r\n Traceback (most recent call last):\r\n File \"<string>\", line 2, in <module>\r\n File \"<pip-setuptools-caller>\", line 34, in <module>\r\n File \"C:\\Users\\Jason Wu\\AppData\\Local\\Temp\\pip-install-trctdimm\\jax_3d064f6acc4d4a0b91bcebef44bb0eb7\\setup.py\", line 38, in <module>\r\n _long_description = f.read()\r\n UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1301: illegal multibyte sequence\r\n [end of output]\r\n\r\n note: This error originates from a subprocess, and is likely not a problem with pip.\r\nerror: metadata-generation-failed\r\n\r\n\u00d7 Encountered error while generating package metadata.\r\n\u2570\u2500> See above for output.\r\n\r\nnote: This is an issue with the package mentioned above, not pip.\r\nhint: See above for details.\r\n```\r\n```cmd\r\nPS C:\\tensorflow> pip install --no-cache-dir jax==0.3.25\r\nCollecting jax==0.3.25\r\n Downloading jax-0.3.25.tar.gz (1.1 MB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 1.1/1.1 MB 4.2 MB/s eta 0:00:00\r\n Preparing metadata (setup.py) ... error\r\n error: subprocess-exited-with-error\r\n\r\n \u00d7 python setup.py egg_info did not run successfully.\r\n \u2502 exit code: 1\r\n \u2570\u2500> [6 lines of output]\r\n Traceback (most recent call last):\r\n File \"<string>\", line 2, in <module>\r\n File \"<pip-setuptools-caller>\", line 34, in <module>\r\n File \"C:\\Users\\Jason Wu\\AppData\\Local\\Temp\\pip-install-h_ymbbfo\\jax_9f7e4365d3a24fb4bb4e74d1b86678e6\\setup.py\", line 38, in <module>\r\n _long_description = f.read()\r\n UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1304: illegal multibyte sequence\r\n [end of output]\r\n\r\n note: This error originates from a subprocess, and is likely not a problem with pip.\r\nerror: metadata-generation-failed\r\n\r\n\u00d7 Encountered error while generating package metadata.\r\n\u2570\u2500> See above for output.\r\n\r\nnote: This is an issue with the package mentioned above, not pip.\r\nhint: See above for details.\r\n```\r\n\r\nIt's worth noting that `0.3.14` installed without issue:\r\n```cmd\r\nPS C:\\tensorflow> pip install --no-cache-dir jax==0.3.14\r\nCollecting jax==0.3.14\r\n Downloading jax-0.3.14.tar.gz (990 kB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 990.1/990.1 kB 2.6 MB/s eta 0:00:00\r\n Preparing metadata (setup.py) ... done\r\nRequirement already satisfied: absl-py ...\r\n...\r\nBuilding wheels for collected packages: jax\r\n Building wheel for jax (setup.py) ... done\r\n Created wheel for jax: filename=jax-0.3.14-py3-none-any.whl size=1147576 sha256=ae0aadd2688c543930de9ba7e31e58b3363a8752ae25e4770941b53e1c3476d6\r\n Stored in directory: C:\\Users\\Jason Wu\\AppData\\Local\\Temp\\pip-ephem-wheel-cache-q92x7zxq\\wheels\\3d\\22\\cf\\75c40ec058e8d0f9b8427cd35366e1fd618475451fc8fc6fd7\r\nSuccessfully built jax\r\nInstalling collected packages: jax\r\nSuccessfully installed jax-0.3.14\r\n```\r\n\r\nSince I'm building Tensorflow from source, it requires Jax > `0.3.15`.\r\n\r\nPlease let me know if there is any other information that needs to be provided. thanks!\n\n### What jax/jaxlib version are you using?\n\njax v0.3.15\n\n### Which accelerator(s) are you using?\n\nCPU\n\n### Additional system info\n\nPython 3.9.13, pip 22.3.1, Windows 10, \n\n### NVIDIA GPU info\n\n_No response_\n", "before_files": [{"content": "# Copyright 2018 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom distutils import spawn\nimport subprocess\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n_current_jaxlib_version = '0.4.1'\n# The following should be updated with each new jaxlib release.\n_latest_jaxlib_version_on_pypi = '0.4.1'\n_available_cuda_versions = ['11']\n_default_cuda_version = '11'\n_available_cudnn_versions = ['82', '86']\n_default_cudnn_version = '86'\n_libtpu_version = '0.1.dev20221212'\n\n_dct = {}\nwith open('jax/version.py') as f:\n exec(f.read(), _dct)\n__version__ = _dct['__version__']\n_minimum_jaxlib_version = _dct['_minimum_jaxlib_version']\n\nwith open('README.md') as f:\n _long_description = f.read()\n\nif 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\nelse:\n protoc = spawn.find_executable('protoc')\n\ndef generate_proto(source):\n if not protoc or not os.path.exists(source):\n return\n protoc_command = [protoc, '-I.', '--python_out=.', source]\n if subprocess.call(protoc_command) != 0:\n sys.exit(-1)\n\ngenerate_proto(\"jax/experimental/australis/executable.proto\")\ngenerate_proto(\"jax/experimental/australis/petri.proto\")\n\nsetup(\n name='jax',\n version=__version__,\n description='Differentiate, compile, and transform Numpy code.',\n long_description=_long_description,\n long_description_content_type='text/markdown',\n author='JAX team',\n author_email='[email protected]',\n packages=find_packages(exclude=[\"examples\"]),\n package_data={'jax': ['py.typed', \"*.pyi\", \"**/*.pyi\"]},\n python_requires='>=3.8',\n install_requires=[\n 'numpy>=1.20',\n 'opt_einsum',\n 'scipy>=1.5',\n ],\n extras_require={\n # Minimum jaxlib version; used in testing.\n 'minimum-jaxlib': [f'jaxlib=={_minimum_jaxlib_version}'],\n\n # CPU-only jaxlib can be installed via:\n # $ pip install jax[cpu]\n 'cpu': [f'jaxlib=={_current_jaxlib_version}'],\n\n # Used only for CI builds that install JAX from github HEAD.\n 'ci': [f'jaxlib=={_latest_jaxlib_version_on_pypi}'],\n\n # Cloud TPU VM jaxlib can be installed via:\n # $ pip install jax[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html\n 'tpu': [f'jaxlib=={_current_jaxlib_version}',\n f'libtpu-nightly=={_libtpu_version}',\n # Required by cloud_tpu_init.py\n 'requests'],\n\n # $ pip install jax[australis]\n 'australis': ['protobuf>=3.13,<4'],\n\n # CUDA installations require adding jax releases URL; e.g.\n # Cuda installation defaulting to a CUDA and Cudnn version defined above.\n # $ pip install jax[cuda] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html\n 'cuda': [f\"jaxlib=={_current_jaxlib_version}+cuda{_default_cuda_version}.cudnn{_default_cudnn_version}\"],\n\n # CUDA installations require adding jax releases URL; e.g.\n # $ pip install jax[cuda11_cudnn82] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html\n # $ pip install jax[cuda11_cudnn86] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html\n **{f'cuda{cuda_version}_cudnn{cudnn_version}': f\"jaxlib=={_current_jaxlib_version}+cuda{cuda_version}.cudnn{cudnn_version}\"\n for cuda_version in _available_cuda_versions for cudnn_version in _available_cudnn_versions}\n },\n url='https://github.com/google/jax',\n license='Apache-2.0',\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 3,630 | 177 |
gh_patches_debug_17518 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-1644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[vec][search] update to paddlespeech model
</issue>
<code>
[start of demos/audio_searching/src/encode.py]
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import numpy as np
15 from logs import LOGGER
16
17 from paddlespeech.cli import VectorExecutor
18
19 vector_executor = VectorExecutor()
20
21
22 def get_audio_embedding(path):
23 """
24 Use vpr_inference to generate embedding of audio
25 """
26 try:
27 embedding = vector_executor(audio_file=path)
28 embedding = embedding / np.linalg.norm(embedding)
29 embedding = embedding.tolist()
30 return embedding
31 except Exception as e:
32 LOGGER.error(f"Error with embedding:{e}")
33 return None
34
[end of demos/audio_searching/src/encode.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py
--- a/demos/audio_searching/src/encode.py
+++ b/demos/audio_searching/src/encode.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
-from logs import LOGGER
+from logs import LOGGER
from paddlespeech.cli import VectorExecutor
vector_executor = VectorExecutor()
@@ -24,7 +24,8 @@
Use vpr_inference to generate embedding of audio
"""
try:
- embedding = vector_executor(audio_file=path)
+ embedding = vector_executor(
+ audio_file=path, model='ecapatdnn_voxceleb12')
embedding = embedding / np.linalg.norm(embedding)
embedding = embedding.tolist()
return embedding
| {"golden_diff": "diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py\n--- a/demos/audio_searching/src/encode.py\n+++ b/demos/audio_searching/src/encode.py\n@@ -12,8 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import numpy as np\n-from logs import LOGGER\n \n+from logs import LOGGER\n from paddlespeech.cli import VectorExecutor\n \n vector_executor = VectorExecutor()\n@@ -24,7 +24,8 @@\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n- embedding = vector_executor(audio_file=path)\n+ embedding = vector_executor(\n+ audio_file=path, model='ecapatdnn_voxceleb12')\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()\n return embedding\n", "issue": "[vec][search] update to paddlespeech model\n\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nfrom logs import LOGGER\n\nfrom paddlespeech.cli import VectorExecutor\n\nvector_executor = VectorExecutor()\n\n\ndef get_audio_embedding(path):\n \"\"\"\n Use vpr_inference to generate embedding of audio\n \"\"\"\n try:\n embedding = vector_executor(audio_file=path)\n embedding = embedding / np.linalg.norm(embedding)\n embedding = embedding.tolist()\n return embedding\n except Exception as e:\n LOGGER.error(f\"Error with embedding:{e}\")\n return None\n", "path": "demos/audio_searching/src/encode.py"}]} | 857 | 195 |
gh_patches_debug_21048 | rasdani/github-patches | git_diff | frappe__frappe-25856 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add jitter on scheduled jobs
hourly, daily long processes if all started at once can cause sudden increase in workload if you have many sites/benches.
Adding simple jitter to scheduled time can lessen the impact of such issues. Jitter is common pattern used for solving problems with "frequency" becomes a problem. E.g. gunicorn adds jitter to avoid restarting all workers at same time, profilers add jitter to avoid amplifying some pattern of repeated work. retry/backoff implementations also use to avoid creating patterns.
Possible implementation: When importing scheduled job types add some random delays in cron. E.g. daily jobs will start in the range of 12:00-12:15 AM instead of all starting at 12:00 AM.
Cons: Some jobs are required to be executed at specific times e.g. birthday reminders. So adding negative offset can introduce bugs for them, positive offset however should be fine AFAIK.
</issue>
<code>
[start of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py]
1 # Copyright (c) 2021, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import json
5 from datetime import datetime
6
7 import click
8 from croniter import CroniterBadCronError, croniter
9
10 import frappe
11 from frappe import _
12 from frappe.model.document import Document
13 from frappe.utils import get_datetime, now_datetime
14 from frappe.utils.background_jobs import enqueue, is_job_enqueued
15
16
17 class ScheduledJobType(Document):
18 def autoname(self):
19 self.name = ".".join(self.method.split(".")[-2:])
20
21 def validate(self):
22 if self.frequency != "All":
23 # force logging for all events other than continuous ones (ALL)
24 self.create_log = 1
25
26 if self.frequency == "Cron":
27 if not self.cron_format:
28 frappe.throw(_("Cron format is required for job types with Cron frequency."))
29 try:
30 croniter(self.cron_format)
31 except CroniterBadCronError:
32 frappe.throw(
33 _("{0} is not a valid Cron expression.").format(f"<code>{self.cron_format}</code>"),
34 title=_("Bad Cron Expression"),
35 )
36
37 def enqueue(self, force=False) -> bool:
38 # enqueue event if last execution is done
39 if self.is_event_due() or force:
40 if frappe.flags.enqueued_jobs:
41 frappe.flags.enqueued_jobs.append(self.method)
42
43 if frappe.flags.execute_job:
44 self.execute()
45 else:
46 if not self.is_job_in_queue():
47 enqueue(
48 "frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job",
49 queue=self.get_queue_name(),
50 job_type=self.method,
51 job_id=self.rq_job_id,
52 )
53 return True
54 else:
55 frappe.logger("scheduler").error(
56 f"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}"
57 )
58 return False
59
60 def is_event_due(self, current_time=None):
61 """Return true if event is due based on time lapsed since last execution"""
62 # if the next scheduled event is before NOW, then its due!
63 return self.get_next_execution() <= (current_time or now_datetime())
64
65 def is_job_in_queue(self) -> bool:
66 return is_job_enqueued(self.rq_job_id)
67
68 @property
69 def rq_job_id(self):
70 """Unique ID created to deduplicate jobs with single RQ call."""
71 return f"scheduled_job::{self.method}"
72
73 @property
74 def next_execution(self):
75 return self.get_next_execution()
76
77 def get_next_execution(self):
78 CRON_MAP = {
79 "Yearly": "0 0 1 1 *",
80 "Annual": "0 0 1 1 *",
81 "Monthly": "0 0 1 * *",
82 "Monthly Long": "0 0 1 * *",
83 "Weekly": "0 0 * * 0",
84 "Weekly Long": "0 0 * * 0",
85 "Daily": "0 0 * * *",
86 "Daily Long": "0 0 * * *",
87 "Hourly": "0 * * * *",
88 "Hourly Long": "0 * * * *",
89 "All": "0/" + str((frappe.get_conf().scheduler_interval or 240) // 60) + " * * * *",
90 }
91
92 if not self.cron_format:
93 self.cron_format = CRON_MAP[self.frequency]
94
95 # If this is a cold start then last_execution will not be set.
96 # Creation is set as fallback because if very old fallback is set job might trigger
97 # immediately, even when it's meant to be daily.
98 # A dynamic fallback like current time might miss the scheduler interval and job will never start.
99 last_execution = get_datetime(self.last_execution or self.creation)
100 return croniter(self.cron_format, last_execution).get_next(datetime)
101
102 def execute(self):
103 self.scheduler_log = None
104 try:
105 self.log_status("Start")
106 if self.server_script:
107 script_name = frappe.db.get_value("Server Script", self.server_script)
108 if script_name:
109 frappe.get_doc("Server Script", script_name).execute_scheduled_method()
110 else:
111 frappe.get_attr(self.method)()
112 frappe.db.commit()
113 self.log_status("Complete")
114 except Exception:
115 frappe.db.rollback()
116 self.log_status("Failed")
117
118 def log_status(self, status):
119 # log file
120 frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}")
121 self.update_scheduler_log(status)
122
123 def update_scheduler_log(self, status):
124 if not self.create_log:
125 # self.get_next_execution will work properly iff self.last_execution is properly set
126 if self.frequency == "All" and status == "Start":
127 self.db_set("last_execution", now_datetime(), update_modified=False)
128 frappe.db.commit()
129 return
130 if not self.scheduler_log:
131 self.scheduler_log = frappe.get_doc(
132 dict(doctype="Scheduled Job Log", scheduled_job_type=self.name)
133 ).insert(ignore_permissions=True)
134 self.scheduler_log.db_set("status", status)
135 if status == "Failed":
136 self.scheduler_log.db_set("details", frappe.get_traceback())
137 if status == "Start":
138 self.db_set("last_execution", now_datetime(), update_modified=False)
139 frappe.db.commit()
140
141 def get_queue_name(self):
142 return "long" if ("Long" in self.frequency) else "default"
143
144 def on_trash(self):
145 frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name})
146
147
148 @frappe.whitelist()
149 def execute_event(doc: str):
150 frappe.only_for("System Manager")
151 doc = json.loads(doc)
152 frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True)
153 return doc
154
155
156 def run_scheduled_job(job_type: str):
157 """This is a wrapper function that runs a hooks.scheduler_events method"""
158 try:
159 frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute()
160 except Exception:
161 print(frappe.get_traceback())
162
163
164 def sync_jobs(hooks: dict | None = None):
165 frappe.reload_doc("core", "doctype", "scheduled_job_type")
166 scheduler_events = hooks or frappe.get_hooks("scheduler_events")
167 all_events = insert_events(scheduler_events)
168 clear_events(all_events)
169
170
171 def insert_events(scheduler_events: dict) -> list:
172 cron_jobs, event_jobs = [], []
173 for event_type in scheduler_events:
174 events = scheduler_events.get(event_type)
175 if isinstance(events, dict):
176 cron_jobs += insert_cron_jobs(events)
177 else:
178 # hourly, daily etc
179 event_jobs += insert_event_jobs(events, event_type)
180 return cron_jobs + event_jobs
181
182
183 def insert_cron_jobs(events: dict) -> list:
184 cron_jobs = []
185 for cron_format in events:
186 for event in events.get(cron_format):
187 cron_jobs.append(event)
188 insert_single_event("Cron", event, cron_format)
189 return cron_jobs
190
191
192 def insert_event_jobs(events: list, event_type: str) -> list:
193 event_jobs = []
194 for event in events:
195 event_jobs.append(event)
196 frequency = event_type.replace("_", " ").title()
197 insert_single_event(frequency, event)
198 return event_jobs
199
200
201 def insert_single_event(frequency: str, event: str, cron_format: str | None = None):
202 cron_expr = {"cron_format": cron_format} if cron_format else {}
203
204 try:
205 frappe.get_attr(event)
206 except Exception as e:
207 click.secho(f"{event} is not a valid method: {e}", fg="yellow")
208
209 doc = frappe.get_doc(
210 {
211 "doctype": "Scheduled Job Type",
212 "method": event,
213 "cron_format": cron_format,
214 "frequency": frequency,
215 }
216 )
217
218 if not frappe.db.exists("Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr}):
219 savepoint = "scheduled_job_type_creation"
220 try:
221 frappe.db.savepoint(savepoint)
222 doc.insert()
223 except frappe.DuplicateEntryError:
224 frappe.db.rollback(save_point=savepoint)
225 doc.delete()
226 doc.insert()
227
228
229 def clear_events(all_events: list):
230 for event in frappe.get_all("Scheduled Job Type", fields=["name", "method", "server_script"]):
231 is_server_script = event.server_script
232 is_defined_in_hooks = event.method in all_events
233
234 if not (is_defined_in_hooks or is_server_script):
235 frappe.delete_doc("Scheduled Job Type", event.name)
236
[end of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
@@ -2,7 +2,8 @@
# License: MIT. See LICENSE
import json
-from datetime import datetime
+from datetime import datetime, timedelta
+from random import randint
import click
from croniter import CroniterBadCronError, croniter
@@ -97,7 +98,12 @@
# immediately, even when it's meant to be daily.
# A dynamic fallback like current time might miss the scheduler interval and job will never start.
last_execution = get_datetime(self.last_execution or self.creation)
- return croniter(self.cron_format, last_execution).get_next(datetime)
+ next_execution = croniter(self.cron_format, last_execution).get_next(datetime)
+
+ jitter = 0
+ if self.frequency in ("Hourly Long", "Daily Long"):
+ jitter = randint(1, 600)
+ return next_execution + timedelta(seconds=jitter)
def execute(self):
self.scheduler_log = None
| {"golden_diff": "diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n@@ -2,7 +2,8 @@\n # License: MIT. See LICENSE\n \n import json\n-from datetime import datetime\n+from datetime import datetime, timedelta\n+from random import randint\n \n import click\n from croniter import CroniterBadCronError, croniter\n@@ -97,7 +98,12 @@\n \t\t# immediately, even when it's meant to be daily.\n \t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n \t\tlast_execution = get_datetime(self.last_execution or self.creation)\n-\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n+\t\tnext_execution = croniter(self.cron_format, last_execution).get_next(datetime)\n+\n+\t\tjitter = 0\n+\t\tif self.frequency in (\"Hourly Long\", \"Daily Long\"):\n+\t\t\tjitter = randint(1, 600)\n+\t\treturn next_execution + timedelta(seconds=jitter)\n \n \tdef execute(self):\n \t\tself.scheduler_log = None\n", "issue": "Add jitter on scheduled jobs\nhourly, daily long processes if all started at once can cause sudden increase in workload if you have many sites/benches. \r\n\r\n\r\nAdding simple jitter to scheduled time can lessen the impact of such issues. Jitter is common pattern used for solving problems with \"frequency\" becomes a problem. E.g. gunicorn adds jitter to avoid restarting all workers at same time, profilers add jitter to avoid amplifying some pattern of repeated work. retry/backoff implementations also use to avoid creating patterns.\r\n\r\n\r\nPossible implementation: When importing scheduled job types add some random delays in cron. E.g. daily jobs will start in the range of 12:00-12:15 AM instead of all starting at 12:00 AM.\r\n\r\n\r\nCons: Some jobs are required to be executed at specific times e.g. birthday reminders. So adding negative offset can introduce bugs for them, positive offset however should be fine AFAIK. \n", "before_files": [{"content": "# Copyright (c) 2021, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport json\nfrom datetime import datetime\n\nimport click\nfrom croniter import CroniterBadCronError, croniter\n\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import get_datetime, now_datetime\nfrom frappe.utils.background_jobs import enqueue, is_job_enqueued\n\n\nclass ScheduledJobType(Document):\n\tdef autoname(self):\n\t\tself.name = \".\".join(self.method.split(\".\")[-2:])\n\n\tdef validate(self):\n\t\tif self.frequency != \"All\":\n\t\t\t# force logging for all events other than continuous ones (ALL)\n\t\t\tself.create_log = 1\n\n\t\tif self.frequency == \"Cron\":\n\t\t\tif not self.cron_format:\n\t\t\t\tfrappe.throw(_(\"Cron format is required for job types with Cron frequency.\"))\n\t\t\ttry:\n\t\t\t\tcroniter(self.cron_format)\n\t\t\texcept CroniterBadCronError:\n\t\t\t\tfrappe.throw(\n\t\t\t\t\t_(\"{0} is not a valid Cron expression.\").format(f\"<code>{self.cron_format}</code>\"),\n\t\t\t\t\ttitle=_(\"Bad Cron Expression\"),\n\t\t\t\t)\n\n\tdef enqueue(self, force=False) -> bool:\n\t\t# enqueue event if last execution is done\n\t\tif self.is_event_due() or force:\n\t\t\tif frappe.flags.enqueued_jobs:\n\t\t\t\tfrappe.flags.enqueued_jobs.append(self.method)\n\n\t\t\tif frappe.flags.execute_job:\n\t\t\t\tself.execute()\n\t\t\telse:\n\t\t\t\tif not self.is_job_in_queue():\n\t\t\t\t\tenqueue(\n\t\t\t\t\t\t\"frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job\",\n\t\t\t\t\t\tqueue=self.get_queue_name(),\n\t\t\t\t\t\tjob_type=self.method,\n\t\t\t\t\t\tjob_id=self.rq_job_id,\n\t\t\t\t\t)\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tfrappe.logger(\"scheduler\").error(\n\t\t\t\t\t\tf\"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}\"\n\t\t\t\t\t)\n\t\treturn False\n\n\tdef is_event_due(self, current_time=None):\n\t\t\"\"\"Return true if event is due based on time lapsed since last execution\"\"\"\n\t\t# if the next scheduled event is before NOW, then its due!\n\t\treturn self.get_next_execution() <= (current_time or now_datetime())\n\n\tdef is_job_in_queue(self) -> bool:\n\t\treturn is_job_enqueued(self.rq_job_id)\n\n\t@property\n\tdef rq_job_id(self):\n\t\t\"\"\"Unique ID created to deduplicate jobs with single RQ call.\"\"\"\n\t\treturn f\"scheduled_job::{self.method}\"\n\n\t@property\n\tdef next_execution(self):\n\t\treturn self.get_next_execution()\n\n\tdef get_next_execution(self):\n\t\tCRON_MAP = {\n\t\t\t\"Yearly\": \"0 0 1 1 *\",\n\t\t\t\"Annual\": \"0 0 1 1 *\",\n\t\t\t\"Monthly\": \"0 0 1 * *\",\n\t\t\t\"Monthly Long\": \"0 0 1 * *\",\n\t\t\t\"Weekly\": \"0 0 * * 0\",\n\t\t\t\"Weekly Long\": \"0 0 * * 0\",\n\t\t\t\"Daily\": \"0 0 * * *\",\n\t\t\t\"Daily Long\": \"0 0 * * *\",\n\t\t\t\"Hourly\": \"0 * * * *\",\n\t\t\t\"Hourly Long\": \"0 * * * *\",\n\t\t\t\"All\": \"0/\" + str((frappe.get_conf().scheduler_interval or 240) // 60) + \" * * * *\",\n\t\t}\n\n\t\tif not self.cron_format:\n\t\t\tself.cron_format = CRON_MAP[self.frequency]\n\n\t\t# If this is a cold start then last_execution will not be set.\n\t\t# Creation is set as fallback because if very old fallback is set job might trigger\n\t\t# immediately, even when it's meant to be daily.\n\t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n\t\tlast_execution = get_datetime(self.last_execution or self.creation)\n\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n\n\tdef execute(self):\n\t\tself.scheduler_log = None\n\t\ttry:\n\t\t\tself.log_status(\"Start\")\n\t\t\tif self.server_script:\n\t\t\t\tscript_name = frappe.db.get_value(\"Server Script\", self.server_script)\n\t\t\t\tif script_name:\n\t\t\t\t\tfrappe.get_doc(\"Server Script\", script_name).execute_scheduled_method()\n\t\t\telse:\n\t\t\t\tfrappe.get_attr(self.method)()\n\t\t\tfrappe.db.commit()\n\t\t\tself.log_status(\"Complete\")\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback()\n\t\t\tself.log_status(\"Failed\")\n\n\tdef log_status(self, status):\n\t\t# log file\n\t\tfrappe.logger(\"scheduler\").info(f\"Scheduled Job {status}: {self.method} for {frappe.local.site}\")\n\t\tself.update_scheduler_log(status)\n\n\tdef update_scheduler_log(self, status):\n\t\tif not self.create_log:\n\t\t\t# self.get_next_execution will work properly iff self.last_execution is properly set\n\t\t\tif self.frequency == \"All\" and status == \"Start\":\n\t\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\t\t\tfrappe.db.commit()\n\t\t\treturn\n\t\tif not self.scheduler_log:\n\t\t\tself.scheduler_log = frappe.get_doc(\n\t\t\t\tdict(doctype=\"Scheduled Job Log\", scheduled_job_type=self.name)\n\t\t\t).insert(ignore_permissions=True)\n\t\tself.scheduler_log.db_set(\"status\", status)\n\t\tif status == \"Failed\":\n\t\t\tself.scheduler_log.db_set(\"details\", frappe.get_traceback())\n\t\tif status == \"Start\":\n\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\tfrappe.db.commit()\n\n\tdef get_queue_name(self):\n\t\treturn \"long\" if (\"Long\" in self.frequency) else \"default\"\n\n\tdef on_trash(self):\n\t\tfrappe.db.delete(\"Scheduled Job Log\", {\"scheduled_job_type\": self.name})\n\n\[email protected]()\ndef execute_event(doc: str):\n\tfrappe.only_for(\"System Manager\")\n\tdoc = json.loads(doc)\n\tfrappe.get_doc(\"Scheduled Job Type\", doc.get(\"name\")).enqueue(force=True)\n\treturn doc\n\n\ndef run_scheduled_job(job_type: str):\n\t\"\"\"This is a wrapper function that runs a hooks.scheduler_events method\"\"\"\n\ttry:\n\t\tfrappe.get_doc(\"Scheduled Job Type\", dict(method=job_type)).execute()\n\texcept Exception:\n\t\tprint(frappe.get_traceback())\n\n\ndef sync_jobs(hooks: dict | None = None):\n\tfrappe.reload_doc(\"core\", \"doctype\", \"scheduled_job_type\")\n\tscheduler_events = hooks or frappe.get_hooks(\"scheduler_events\")\n\tall_events = insert_events(scheduler_events)\n\tclear_events(all_events)\n\n\ndef insert_events(scheduler_events: dict) -> list:\n\tcron_jobs, event_jobs = [], []\n\tfor event_type in scheduler_events:\n\t\tevents = scheduler_events.get(event_type)\n\t\tif isinstance(events, dict):\n\t\t\tcron_jobs += insert_cron_jobs(events)\n\t\telse:\n\t\t\t# hourly, daily etc\n\t\t\tevent_jobs += insert_event_jobs(events, event_type)\n\treturn cron_jobs + event_jobs\n\n\ndef insert_cron_jobs(events: dict) -> list:\n\tcron_jobs = []\n\tfor cron_format in events:\n\t\tfor event in events.get(cron_format):\n\t\t\tcron_jobs.append(event)\n\t\t\tinsert_single_event(\"Cron\", event, cron_format)\n\treturn cron_jobs\n\n\ndef insert_event_jobs(events: list, event_type: str) -> list:\n\tevent_jobs = []\n\tfor event in events:\n\t\tevent_jobs.append(event)\n\t\tfrequency = event_type.replace(\"_\", \" \").title()\n\t\tinsert_single_event(frequency, event)\n\treturn event_jobs\n\n\ndef insert_single_event(frequency: str, event: str, cron_format: str | None = None):\n\tcron_expr = {\"cron_format\": cron_format} if cron_format else {}\n\n\ttry:\n\t\tfrappe.get_attr(event)\n\texcept Exception as e:\n\t\tclick.secho(f\"{event} is not a valid method: {e}\", fg=\"yellow\")\n\n\tdoc = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Scheduled Job Type\",\n\t\t\t\"method\": event,\n\t\t\t\"cron_format\": cron_format,\n\t\t\t\"frequency\": frequency,\n\t\t}\n\t)\n\n\tif not frappe.db.exists(\"Scheduled Job Type\", {\"method\": event, \"frequency\": frequency, **cron_expr}):\n\t\tsavepoint = \"scheduled_job_type_creation\"\n\t\ttry:\n\t\t\tfrappe.db.savepoint(savepoint)\n\t\t\tdoc.insert()\n\t\texcept frappe.DuplicateEntryError:\n\t\t\tfrappe.db.rollback(save_point=savepoint)\n\t\t\tdoc.delete()\n\t\t\tdoc.insert()\n\n\ndef clear_events(all_events: list):\n\tfor event in frappe.get_all(\"Scheduled Job Type\", fields=[\"name\", \"method\", \"server_script\"]):\n\t\tis_server_script = event.server_script\n\t\tis_defined_in_hooks = event.method in all_events\n\n\t\tif not (is_defined_in_hooks or is_server_script):\n\t\t\tfrappe.delete_doc(\"Scheduled Job Type\", event.name)\n", "path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py"}]} | 3,336 | 283 |
gh_patches_debug_7068 | rasdani/github-patches | git_diff | chainer__chainer-6489 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ChainerMN's ImageNet example is broken because of update in pickle module
* Conditions
<!-- If you're using Chainer 4.0+, you can also get this information by typing `python -c 'import chainer; chainer.print_runtime_info()'. -->
- Chainer >= 5.0.0
* Code to reproduce
The ImageNet example on Python >= 3.6
```
import multiprocessing
p = multiprocessing.Process(target=lambda *x: x, args=())
p.start()
```
* Error messages, stack traces, or logs
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/process.py", line 105, in start
self._popen = self._Popen(self)
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/context.py", line 291, in _Popen
return Popen(process_obj)
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/popen_forkserver.py", line 35, in __init__
super().__init__(process_obj)
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/popen_fork.py", line 20, in __init__
self._launch(process_obj)
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/popen_forkserver.py", line 47, in _launch
reduction.dump(process_obj, buf)
File "/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <function <lambda> at 0x7fb633f35e18>: attribute lookup <lambda> on __main__ failed
```
For details, see https://docs.python.org/3.6/library/pickle.html
```
The following types can be pickled:
* None, True, and False
* integers, floating point numbers, complex numbers
* strings, bytes, bytearrays
* tuples, lists, sets, and dictionaries containing only picklable objects
* functions defined at the top level of a module (using def, not lambda)
* built-in functions defined at the top level of a module
* classes that are defined at the top level of a module
* instances of such classes whose __dict__ or the result of calling __getstate__() is picklable (see section Pickling Class Instances for details).
```
Thus lambda is not pickable.
</issue>
<code>
[start of examples/chainermn/imagenet/train_imagenet.py]
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4 import argparse
5 import multiprocessing
6 import random
7 import sys
8
9 import numpy as np
10
11 import chainer
12 import chainer.cuda
13 from chainer import training
14 from chainer.training import extensions
15
16 import chainermn
17
18
19 import models.alex as alex
20 import models.googlenet as googlenet
21 import models.googlenetbn as googlenetbn
22 import models.nin as nin
23 import models.resnet50 as resnet50
24
25 # Check Python version if it supports multiprocessing.set_start_method,
26 # which was introduced in Python 3.4
27 major, minor, _, _, _ = sys.version_info
28 if major <= 2 or (major == 3 and minor < 4):
29 sys.stderr.write('Error: ImageNet example uses '
30 'chainer.iterators.MultiprocessIterator, '
31 'which works only with Python >= 3.4. \n'
32 'For more details, see '
33 'http://chainermn.readthedocs.io/en/master/'
34 'tutorial/tips_faqs.html#using-multiprocessiterator\n')
35 exit(-1)
36
37
38 class PreprocessedDataset(chainer.dataset.DatasetMixin):
39
40 def __init__(self, path, root, mean, crop_size, random=True):
41 self.base = chainer.datasets.LabeledImageDataset(path, root)
42 self.mean = mean.astype(np.float32)
43 self.crop_size = crop_size
44 self.random = random
45
46 def __len__(self):
47 return len(self.base)
48
49 def get_example(self, i):
50 # It reads the i-th image/label pair and return a preprocessed image.
51 # It applies following preprocesses:
52 # - Cropping (random or center rectangular)
53 # - Random flip
54 # - Scaling to [0, 1] value
55 crop_size = self.crop_size
56
57 image, label = self.base[i]
58 _, h, w = image.shape
59
60 if self.random:
61 # Randomly crop a region and flip the image
62 top = random.randint(0, h - crop_size - 1)
63 left = random.randint(0, w - crop_size - 1)
64 if random.randint(0, 1):
65 image = image[:, :, ::-1]
66 else:
67 # Crop the center
68 top = (h - crop_size) // 2
69 left = (w - crop_size) // 2
70 bottom = top + crop_size
71 right = left + crop_size
72
73 image = image[:, top:bottom, left:right]
74 image -= self.mean[:, top:bottom, left:right]
75 image *= (1.0 / 255.0) # Scale to [0, 1]
76 return image, label
77
78
79 # chainermn.create_multi_node_evaluator can be also used with user customized
80 # evaluator classes that inherit chainer.training.extensions.Evaluator.
81 class TestModeEvaluator(extensions.Evaluator):
82
83 def evaluate(self):
84 model = self.get_target('main')
85 model.train = False
86 ret = super(TestModeEvaluator, self).evaluate()
87 model.train = True
88 return ret
89
90
91 def main():
92 # Check if GPU is available
93 # (ImageNet example does not support CPU execution)
94 if not chainer.cuda.available:
95 raise RuntimeError('ImageNet requires GPU support.')
96
97 archs = {
98 'alex': alex.Alex,
99 'googlenet': googlenet.GoogLeNet,
100 'googlenetbn': googlenetbn.GoogLeNetBN,
101 'nin': nin.NIN,
102 'resnet50': resnet50.ResNet50,
103 }
104
105 parser = argparse.ArgumentParser(
106 description='Learning convnet from ILSVRC2012 dataset')
107 parser.add_argument('train', help='Path to training image-label list file')
108 parser.add_argument('val', help='Path to validation image-label list file')
109 parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
110 help='Convnet architecture')
111 parser.add_argument('--batchsize', '-B', type=int, default=32,
112 help='Learning minibatch size')
113 parser.add_argument('--epoch', '-E', type=int, default=10,
114 help='Number of epochs to train')
115 parser.add_argument('--initmodel',
116 help='Initialize the model from given file')
117 parser.add_argument('--loaderjob', '-j', type=int,
118 help='Number of parallel data loading processes')
119 parser.add_argument('--mean', '-m', default='mean.npy',
120 help='Mean file (computed by compute_mean.py)')
121 parser.add_argument('--resume', '-r', default='',
122 help='Initialize the trainer from given file')
123 parser.add_argument('--out', '-o', default='result',
124 help='Output directory')
125 parser.add_argument('--root', '-R', default='.',
126 help='Root directory path of image files')
127 parser.add_argument('--val_batchsize', '-b', type=int, default=250,
128 help='Validation minibatch size')
129 parser.add_argument('--test', action='store_true')
130 parser.add_argument('--communicator', default='hierarchical')
131 parser.set_defaults(test=False)
132 args = parser.parse_args()
133
134 # Start method of multiprocessing module need to be changed if we
135 # are using InfiniBand and MultiprocessIterator. This is because
136 # processes often crash when calling fork if they are using
137 # Infiniband. (c.f.,
138 # https://www.open-mpi.org/faq/?category=tuning#fork-warning )
139 # Also, just setting the start method does not seem to be
140 # sufficient to actually launch the forkserver processes, so also
141 # start a dummy process.
142 # See also our document:
143 # https://chainermn.readthedocs.io/en/stable/tutorial/tips_faqs.html#using-multiprocessiterator
144 # This must be done *before* ``chainermn.create_communicator``!!!
145 multiprocessing.set_start_method('forkserver')
146 p = multiprocessing.Process(target=lambda *x: x, args=())
147 p.start()
148 p.join()
149
150 # Prepare ChainerMN communicator.
151 comm = chainermn.create_communicator(args.communicator)
152 device = comm.intra_rank
153
154 if comm.rank == 0:
155 print('==========================================')
156 print('Num process (COMM_WORLD): {}'.format(comm.size))
157 print('Using {} communicator'.format(args.communicator))
158 print('Using {} arch'.format(args.arch))
159 print('Num Minibatch-size: {}'.format(args.batchsize))
160 print('Num epoch: {}'.format(args.epoch))
161 print('==========================================')
162
163 model = archs[args.arch]()
164 if args.initmodel:
165 print('Load model from', args.initmodel)
166 chainer.serializers.load_npz(args.initmodel, model)
167
168 chainer.cuda.get_device_from_id(device).use() # Make the GPU current
169 model.to_gpu()
170
171 # Split and distribute the dataset. Only worker 0 loads the whole dataset.
172 # Datasets of worker 0 are evenly split and distributed to all workers.
173 mean = np.load(args.mean)
174 if comm.rank == 0:
175 train = PreprocessedDataset(args.train, args.root, mean, model.insize)
176 val = PreprocessedDataset(
177 args.val, args.root, mean, model.insize, False)
178 else:
179 train = None
180 val = None
181 train = chainermn.scatter_dataset(train, comm, shuffle=True)
182 val = chainermn.scatter_dataset(val, comm)
183
184 # A workaround for processes crash should be done before making
185 # communicator above, when using fork (e.g. MultiProcessIterator)
186 # along with Infiniband.
187 train_iter = chainer.iterators.MultiprocessIterator(
188 train, args.batchsize, n_processes=args.loaderjob)
189 val_iter = chainer.iterators.MultiprocessIterator(
190 val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
191
192 # Create a multi node optimizer from a standard Chainer optimizer.
193 optimizer = chainermn.create_multi_node_optimizer(
194 chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9), comm)
195 optimizer.setup(model)
196
197 # Set up a trainer
198 updater = training.StandardUpdater(train_iter, optimizer, device=device)
199 trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
200
201 checkpoint_interval = (10, 'iteration') if args.test else (1, 'epoch')
202 val_interval = (10, 'iteration') if args.test else (1, 'epoch')
203 log_interval = (10, 'iteration') if args.test else (1, 'epoch')
204
205 checkpointer = chainermn.create_multi_node_checkpointer(
206 name='imagenet-example', comm=comm)
207 checkpointer.maybe_load(trainer, optimizer)
208 trainer.extend(checkpointer, trigger=checkpoint_interval)
209
210 # Create a multi node evaluator from an evaluator.
211 evaluator = TestModeEvaluator(val_iter, model, device=device)
212 evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
213 trainer.extend(evaluator, trigger=val_interval)
214
215 # Some display and output extensions are necessary only for one worker.
216 # (Otherwise, there would just be repeated outputs.)
217 if comm.rank == 0:
218 trainer.extend(extensions.DumpGraph('main/loss'))
219 trainer.extend(extensions.LogReport(trigger=log_interval))
220 trainer.extend(extensions.observe_lr(), trigger=log_interval)
221 trainer.extend(extensions.PrintReport([
222 'epoch', 'iteration', 'main/loss', 'validation/main/loss',
223 'main/accuracy', 'validation/main/accuracy', 'lr'
224 ]), trigger=log_interval)
225 trainer.extend(extensions.ProgressBar(update_interval=10))
226
227 if args.resume:
228 chainer.serializers.load_npz(args.resume, trainer)
229
230 trainer.run()
231
232
233 if __name__ == '__main__':
234 main()
235
[end of examples/chainermn/imagenet/train_imagenet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/chainermn/imagenet/train_imagenet.py b/examples/chainermn/imagenet/train_imagenet.py
--- a/examples/chainermn/imagenet/train_imagenet.py
+++ b/examples/chainermn/imagenet/train_imagenet.py
@@ -143,7 +143,7 @@
# https://chainermn.readthedocs.io/en/stable/tutorial/tips_faqs.html#using-multiprocessiterator
# This must be done *before* ``chainermn.create_communicator``!!!
multiprocessing.set_start_method('forkserver')
- p = multiprocessing.Process(target=lambda *x: x, args=())
+ p = multiprocessing.Process()
p.start()
p.join()
| {"golden_diff": "diff --git a/examples/chainermn/imagenet/train_imagenet.py b/examples/chainermn/imagenet/train_imagenet.py\n--- a/examples/chainermn/imagenet/train_imagenet.py\n+++ b/examples/chainermn/imagenet/train_imagenet.py\n@@ -143,7 +143,7 @@\n # https://chainermn.readthedocs.io/en/stable/tutorial/tips_faqs.html#using-multiprocessiterator\n # This must be done *before* ``chainermn.create_communicator``!!!\n multiprocessing.set_start_method('forkserver')\n- p = multiprocessing.Process(target=lambda *x: x, args=())\n+ p = multiprocessing.Process()\n p.start()\n p.join()\n", "issue": "ChainerMN's ImageNet example is broken because of update in pickle module\n* Conditions\r\n<!-- If you're using Chainer 4.0+, you can also get this information by typing `python -c 'import chainer; chainer.print_runtime_info()'. -->\r\n - Chainer >= 5.0.0\r\n\r\n* Code to reproduce\r\nThe ImageNet example on Python >= 3.6\r\n\r\n```\r\nimport multiprocessing\r\np = multiprocessing.Process(target=lambda *x: x, args=())\r\np.start()\r\n```\r\n\r\n* Error messages, stack traces, or logs\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/process.py\", line 105, in start\r\n self._popen = self._Popen(self)\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/context.py\", line 223, in _Popen\r\n return _default_context.get_context().Process._Popen(process_obj)\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/context.py\", line 291, in _Popen\r\n return Popen(process_obj)\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/popen_forkserver.py\", line 35, in __init__\r\n super().__init__(process_obj)\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/popen_fork.py\", line 20, in __init__\r\n self._launch(process_obj)\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/popen_forkserver.py\", line 47, in _launch\r\n reduction.dump(process_obj, buf)\r\n File \"/home/kfukuda/.pyenv/versions/anaconda3-4.3.1/lib/python3.6/multiprocessing/reduction.py\", line 60, in dump\r\n ForkingPickler(file, protocol).dump(obj)\r\n_pickle.PicklingError: Can't pickle <function <lambda> at 0x7fb633f35e18>: attribute lookup <lambda> on __main__ failed\r\n```\r\nFor details, see https://docs.python.org/3.6/library/pickle.html\r\n\r\n```\r\nThe following types can be pickled:\r\n\r\n* None, True, and False\r\n* integers, floating point numbers, complex numbers\r\n* strings, bytes, bytearrays\r\n* tuples, lists, sets, and dictionaries containing only picklable objects\r\n* functions defined at the top level of a module (using def, not lambda)\r\n* built-in functions defined at the top level of a module\r\n* classes that are defined at the top level of a module\r\n* instances of such classes whose __dict__ or the result of calling __getstate__() is picklable (see section Pickling Class Instances for details).\r\n```\r\n\r\nThus lambda is not pickable.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport argparse\nimport multiprocessing\nimport random\nimport sys\n\nimport numpy as np\n\nimport chainer\nimport chainer.cuda\nfrom chainer import training\nfrom chainer.training import extensions\n\nimport chainermn\n\n\nimport models.alex as alex\nimport models.googlenet as googlenet\nimport models.googlenetbn as googlenetbn\nimport models.nin as nin\nimport models.resnet50 as resnet50\n\n# Check Python version if it supports multiprocessing.set_start_method,\n# which was introduced in Python 3.4\nmajor, minor, _, _, _ = sys.version_info\nif major <= 2 or (major == 3 and minor < 4):\n sys.stderr.write('Error: ImageNet example uses '\n 'chainer.iterators.MultiprocessIterator, '\n 'which works only with Python >= 3.4. \\n'\n 'For more details, see '\n 'http://chainermn.readthedocs.io/en/master/'\n 'tutorial/tips_faqs.html#using-multiprocessiterator\\n')\n exit(-1)\n\n\nclass PreprocessedDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, path, root, mean, crop_size, random=True):\n self.base = chainer.datasets.LabeledImageDataset(path, root)\n self.mean = mean.astype(np.float32)\n self.crop_size = crop_size\n self.random = random\n\n def __len__(self):\n return len(self.base)\n\n def get_example(self, i):\n # It reads the i-th image/label pair and return a preprocessed image.\n # It applies following preprocesses:\n # - Cropping (random or center rectangular)\n # - Random flip\n # - Scaling to [0, 1] value\n crop_size = self.crop_size\n\n image, label = self.base[i]\n _, h, w = image.shape\n\n if self.random:\n # Randomly crop a region and flip the image\n top = random.randint(0, h - crop_size - 1)\n left = random.randint(0, w - crop_size - 1)\n if random.randint(0, 1):\n image = image[:, :, ::-1]\n else:\n # Crop the center\n top = (h - crop_size) // 2\n left = (w - crop_size) // 2\n bottom = top + crop_size\n right = left + crop_size\n\n image = image[:, top:bottom, left:right]\n image -= self.mean[:, top:bottom, left:right]\n image *= (1.0 / 255.0) # Scale to [0, 1]\n return image, label\n\n\n# chainermn.create_multi_node_evaluator can be also used with user customized\n# evaluator classes that inherit chainer.training.extensions.Evaluator.\nclass TestModeEvaluator(extensions.Evaluator):\n\n def evaluate(self):\n model = self.get_target('main')\n model.train = False\n ret = super(TestModeEvaluator, self).evaluate()\n model.train = True\n return ret\n\n\ndef main():\n # Check if GPU is available\n # (ImageNet example does not support CPU execution)\n if not chainer.cuda.available:\n raise RuntimeError('ImageNet requires GPU support.')\n\n archs = {\n 'alex': alex.Alex,\n 'googlenet': googlenet.GoogLeNet,\n 'googlenetbn': googlenetbn.GoogLeNetBN,\n 'nin': nin.NIN,\n 'resnet50': resnet50.ResNet50,\n }\n\n parser = argparse.ArgumentParser(\n description='Learning convnet from ILSVRC2012 dataset')\n parser.add_argument('train', help='Path to training image-label list file')\n parser.add_argument('val', help='Path to validation image-label list file')\n parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',\n help='Convnet architecture')\n parser.add_argument('--batchsize', '-B', type=int, default=32,\n help='Learning minibatch size')\n parser.add_argument('--epoch', '-E', type=int, default=10,\n help='Number of epochs to train')\n parser.add_argument('--initmodel',\n help='Initialize the model from given file')\n parser.add_argument('--loaderjob', '-j', type=int,\n help='Number of parallel data loading processes')\n parser.add_argument('--mean', '-m', default='mean.npy',\n help='Mean file (computed by compute_mean.py)')\n parser.add_argument('--resume', '-r', default='',\n help='Initialize the trainer from given file')\n parser.add_argument('--out', '-o', default='result',\n help='Output directory')\n parser.add_argument('--root', '-R', default='.',\n help='Root directory path of image files')\n parser.add_argument('--val_batchsize', '-b', type=int, default=250,\n help='Validation minibatch size')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--communicator', default='hierarchical')\n parser.set_defaults(test=False)\n args = parser.parse_args()\n\n # Start method of multiprocessing module need to be changed if we\n # are using InfiniBand and MultiprocessIterator. This is because\n # processes often crash when calling fork if they are using\n # Infiniband. (c.f.,\n # https://www.open-mpi.org/faq/?category=tuning#fork-warning )\n # Also, just setting the start method does not seem to be\n # sufficient to actually launch the forkserver processes, so also\n # start a dummy process.\n # See also our document:\n # https://chainermn.readthedocs.io/en/stable/tutorial/tips_faqs.html#using-multiprocessiterator\n # This must be done *before* ``chainermn.create_communicator``!!!\n multiprocessing.set_start_method('forkserver')\n p = multiprocessing.Process(target=lambda *x: x, args=())\n p.start()\n p.join()\n\n # Prepare ChainerMN communicator.\n comm = chainermn.create_communicator(args.communicator)\n device = comm.intra_rank\n\n if comm.rank == 0:\n print('==========================================')\n print('Num process (COMM_WORLD): {}'.format(comm.size))\n print('Using {} communicator'.format(args.communicator))\n print('Using {} arch'.format(args.arch))\n print('Num Minibatch-size: {}'.format(args.batchsize))\n print('Num epoch: {}'.format(args.epoch))\n print('==========================================')\n\n model = archs[args.arch]()\n if args.initmodel:\n print('Load model from', args.initmodel)\n chainer.serializers.load_npz(args.initmodel, model)\n\n chainer.cuda.get_device_from_id(device).use() # Make the GPU current\n model.to_gpu()\n\n # Split and distribute the dataset. Only worker 0 loads the whole dataset.\n # Datasets of worker 0 are evenly split and distributed to all workers.\n mean = np.load(args.mean)\n if comm.rank == 0:\n train = PreprocessedDataset(args.train, args.root, mean, model.insize)\n val = PreprocessedDataset(\n args.val, args.root, mean, model.insize, False)\n else:\n train = None\n val = None\n train = chainermn.scatter_dataset(train, comm, shuffle=True)\n val = chainermn.scatter_dataset(val, comm)\n\n # A workaround for processes crash should be done before making\n # communicator above, when using fork (e.g. MultiProcessIterator)\n # along with Infiniband.\n train_iter = chainer.iterators.MultiprocessIterator(\n train, args.batchsize, n_processes=args.loaderjob)\n val_iter = chainer.iterators.MultiprocessIterator(\n val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)\n\n # Create a multi node optimizer from a standard Chainer optimizer.\n optimizer = chainermn.create_multi_node_optimizer(\n chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9), comm)\n optimizer.setup(model)\n\n # Set up a trainer\n updater = training.StandardUpdater(train_iter, optimizer, device=device)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)\n\n checkpoint_interval = (10, 'iteration') if args.test else (1, 'epoch')\n val_interval = (10, 'iteration') if args.test else (1, 'epoch')\n log_interval = (10, 'iteration') if args.test else (1, 'epoch')\n\n checkpointer = chainermn.create_multi_node_checkpointer(\n name='imagenet-example', comm=comm)\n checkpointer.maybe_load(trainer, optimizer)\n trainer.extend(checkpointer, trigger=checkpoint_interval)\n\n # Create a multi node evaluator from an evaluator.\n evaluator = TestModeEvaluator(val_iter, model, device=device)\n evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)\n trainer.extend(evaluator, trigger=val_interval)\n\n # Some display and output extensions are necessary only for one worker.\n # (Otherwise, there would just be repeated outputs.)\n if comm.rank == 0:\n trainer.extend(extensions.DumpGraph('main/loss'))\n trainer.extend(extensions.LogReport(trigger=log_interval))\n trainer.extend(extensions.observe_lr(), trigger=log_interval)\n trainer.extend(extensions.PrintReport([\n 'epoch', 'iteration', 'main/loss', 'validation/main/loss',\n 'main/accuracy', 'validation/main/accuracy', 'lr'\n ]), trigger=log_interval)\n trainer.extend(extensions.ProgressBar(update_interval=10))\n\n if args.resume:\n chainer.serializers.load_npz(args.resume, trainer)\n\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/chainermn/imagenet/train_imagenet.py"}]} | 4,047 | 160 |
gh_patches_debug_35573 | rasdani/github-patches | git_diff | tensorflow__addons-514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add docstring for Correlation cost layer
1. Hide [correlation_cost](https://github.com/tensorflow/addons/blob/cffee80779e606870ebf2b7fce241eeb870053f5/tensorflow_addons/layers/optical_flow.py#L30) function, and make it private.
2. Add document for [CorrelationCost](https://github.com/tensorflow/addons/blob/cffee80779e606870ebf2b7fce241eeb870053f5/tensorflow_addons/layers/optical_flow.py#L143)
For details please check the discussion in https://github.com/tensorflow/addons/pull/207
</issue>
<code>
[start of tensorflow_addons/layers/optical_flow.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ============================================================================
15 """Tensorflow op performing correlation cost operation."""
16
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import tensorflow as tf
22 from tensorflow_addons.utils import keras_utils
23 from tensorflow_addons.utils.resource_loader import get_path_to_datafile
24
25 _correlation_cost_op_so = tf.load_op_library(
26 get_path_to_datafile("custom_ops/layers/_correlation_cost_ops.so"))
27
28
29 @tf.function
30 def correlation_cost(input_a,
31 input_b,
32 kernel_size,
33 max_displacement,
34 stride_1,
35 stride_2,
36 pad,
37 data_format='channels_last',
38 name=None):
39 """Correlation Cost Volume computation.
40
41 "FlowNet: Learning Optical Flow with Convolutional Networks"
42 Philipp Fischer, Alexey Dosovitskiy, Eddy Ilg, Philip Hausser,
43 Caner Hazirbas, Vladimir Golkov, Patrick van der Smagt,
44 Daniel Cremers, Thomas Brox. https://arxiv.org/abs/1504.06852
45
46 Computes a cost volume using correlation for two inputs. For feature
47 maps A, B with spatial dimensions w, h, c it computes
48
49 output(a, b) = sum_{l in [-k,k]**2} < I(a+l), J(b+l) >
50
51 where the patches of size K=2d + 1 are centered in position a resp. b.
52
53 The output shape is [B, C', H', W'], where
54
55 r = max_displacement / stride_2;
56 bd = max_displacement + (kernel_size - 1) / 2
57 C' = (2 * r + 1) ** 2
58 H' = H + 2 * (pad - bd) / stride_1
59 W' = W + 2 * (pad - bd) / stride_1
60
61 Note: When the data_format requests "channels_last", an additional explicit
62 transpose operation is executed.
63
64 Args:
65 input_a: A `Tensor` of the format specified by `data_format`.
66 input_b: A `Tensor` of the format specified by `data_format`.
67 kernel_size: An integer specifying the height and width of the
68 patch used to compute the per-patch costs.
69 max_displacement: An integer specifying the maximum search radius
70 for each position.
71 stride_1: An integer specifying the stride length in the input.
72 stride_2: An integer specifying the stride length in the patch.
73 pad: An integer specifying the paddings in height and width.
74 data_format: Specifies the data format.
75 Possible values are:
76 "channels_last" float [batch, height, width, channels]
77 "channels_first" float [batch, channels, height, width]
78 Defaults to `"channels_last"`.
79 name: A name for the operation (optional).
80
81 Returns:
82 A `Tensor` of the format specified by `data_format`.
83 """
84
85 with tf.name_scope(name or "correlation_cost"):
86 op_call = _correlation_cost_op_so.addons_correlation_cost
87
88 if data_format == "channels_last":
89 op_data_format = "NHWC"
90 elif data_format == "channels_first":
91 op_data_format = "NCHW"
92 else:
93 raise ValueError("`data_format` must be either `channels_last` or"
94 "`channels_first`")
95
96 ret = op_call(
97 input_a,
98 input_b,
99 kernel_size=kernel_size,
100 max_displacement=max_displacement,
101 stride_1=stride_1,
102 stride_2=stride_2,
103 pad=pad,
104 data_format=op_data_format)
105 if data_format == 'channels_last':
106 # this is easier to maintain without
107 # specializing an additional cuda kernel
108 return tf.transpose(ret, [0, 2, 3, 1])
109 return ret
110
111
112 @tf.RegisterGradient("Addons>CorrelationCost")
113 def _correlation_cost_grad(op, grad_output):
114 kernel_size = op.get_attr("kernel_size")
115 max_displacement = op.get_attr("max_displacement")
116 stride_1 = op.get_attr("stride_1")
117 stride_2 = op.get_attr("stride_2")
118 pad = op.get_attr("pad")
119 data_format = op.get_attr("data_format")
120
121 input_a = tf.convert_to_tensor(op.inputs[0], name="input_a")
122 input_b = tf.convert_to_tensor(op.inputs[1], name="input_b")
123 grad_output_tensor = tf.convert_to_tensor(grad_output, name="grad_output")
124
125 op_call = _correlation_cost_op_so.addons_correlation_cost_grad
126 grads = op_call(
127 input_a,
128 input_b,
129 grad_output_tensor,
130 kernel_size=kernel_size,
131 max_displacement=max_displacement,
132 stride_1=stride_1,
133 stride_2=stride_2,
134 pad=pad,
135 data_format=data_format)
136
137 grad_input_a = tf.convert_to_tensor(grads[0], name="grad_input_a")
138 grad_input_b = tf.convert_to_tensor(grads[1], name="grad_input_b")
139 return [grad_input_a, grad_input_b]
140
141
142 @keras_utils.register_keras_custom_object
143 class CorrelationCost(tf.keras.layers.Layer):
144 def __init__(self, kernel_size, max_displacement, stride_1, stride_2, pad,
145 data_format, **kwargs):
146 self.kernel_size = kernel_size
147 self.max_displacement = max_displacement
148 self.stride_1 = stride_1
149 self.stride_2 = stride_2
150 self.pad = pad
151
152 if data_format != "channels_last" and data_format != "channels_first":
153 raise ValueError("`data_format` must be either `channels_last` or"
154 "`channels_first`, instead got %s" % data_format)
155
156 self.data_format = data_format
157
158 super(CorrelationCost, self).__init__(**kwargs)
159
160 def build(self, input_shape):
161 if not isinstance(input_shape, list):
162 raise ValueError("Input must be a list of two Tensors to process")
163 super(CorrelationCost, self).build(input_shape)
164
165 def call(self, inputs):
166 if not isinstance(inputs, list):
167 raise ValueError("Input must be a list of two Tensors to process")
168
169 input_a = tf.convert_to_tensor(inputs[0])
170 input_b = tf.convert_to_tensor(inputs[1])
171
172 return correlation_cost(
173 input_a,
174 input_b,
175 kernel_size=self.kernel_size,
176 max_displacement=self.max_displacement,
177 stride_1=self.stride_1,
178 stride_2=self.stride_2,
179 pad=self.pad,
180 data_format=self.data_format)
181
182 def compute_output_shape(self, input_shape):
183 assert isinstance(input_shape, list)
184 n = input_shape[0][0]
185 r = self.max_displacement / self.stride_2
186 bd = self.max_displacement + (self.kernel_size - 1) / 2
187 output_c = (2 * r + 1)**2
188
189 if self.data_format == "channels_first":
190 output_h = input_shape[0][1] + 2 * (self.pad - bd) / self.stride_1
191 output_w = input_shape[0][2] + 2 * (self.pad - bd) / self.stride_1
192 return [n, output_c, output_h, output_w]
193
194 elif self.data_format == "channels_last":
195 output_h = input_shape[0][0] + 2 * (self.pad - bd) / self.stride_1
196 output_w = input_shape[0][1] + 2 * (self.pad - bd) / self.stride_1
197 return [n, output_h, output_w, output_c]
198 else:
199 raise ValueError("`data_format` must be either `channels_last` or"
200 "`channels_first`")
201
202 def get_config(self):
203 config = {
204 'kernel_size': self.kernel_size,
205 'max_displacement': self.max_displacement,
206 'stride_1': self.stride_1,
207 'stride_2': self.stride_2,
208 'pad': self.pad,
209 'data_format': self.data_format
210 }
211
212 base_config = super(CorrelationCost, self).get_config()
213 return dict(list(base_config.items()) + list(config.items()))
214
[end of tensorflow_addons/layers/optical_flow.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/layers/optical_flow.py b/tensorflow_addons/layers/optical_flow.py
--- a/tensorflow_addons/layers/optical_flow.py
+++ b/tensorflow_addons/layers/optical_flow.py
@@ -27,15 +27,15 @@
@tf.function
-def correlation_cost(input_a,
- input_b,
- kernel_size,
- max_displacement,
- stride_1,
- stride_2,
- pad,
- data_format='channels_last',
- name=None):
+def _correlation_cost(input_a,
+ input_b,
+ kernel_size,
+ max_displacement,
+ stride_1,
+ stride_2,
+ pad,
+ data_format='channels_last',
+ name=None):
"""Correlation Cost Volume computation.
"FlowNet: Learning Optical Flow with Convolutional Networks"
@@ -141,6 +141,27 @@
@keras_utils.register_keras_custom_object
class CorrelationCost(tf.keras.layers.Layer):
+ """Correlation Cost Layer.
+
+ This layer implements the correlation operation from FlowNet Learning
+ Optical Flow with Convolutional Networks (Fischer et al.):
+ https://arxiv.org/abs/1504.06
+
+ Args:
+ kernel_size: An integer specifying the height and width of the
+ patch used to compute the per-patch costs.
+ max_displacement: An integer specifying the maximum search radius
+ for each position.
+ stride_1: An integer specifying the stride length in the input.
+ stride_2: An integer specifying the stride length in the patch.
+ pad: An integer specifying the paddings in height and width.
+ data_format: Specifies the data format.
+ Possible values are:
+ "channels_last" float [batch, height, width, channels]
+ "channels_first" float [batch, channels, height, width]
+ Defaults to `"channels_last"`.
+ """
+
def __init__(self, kernel_size, max_displacement, stride_1, stride_2, pad,
data_format, **kwargs):
self.kernel_size = kernel_size
@@ -169,7 +190,7 @@
input_a = tf.convert_to_tensor(inputs[0])
input_b = tf.convert_to_tensor(inputs[1])
- return correlation_cost(
+ return _correlation_cost(
input_a,
input_b,
kernel_size=self.kernel_size,
| {"golden_diff": "diff --git a/tensorflow_addons/layers/optical_flow.py b/tensorflow_addons/layers/optical_flow.py\n--- a/tensorflow_addons/layers/optical_flow.py\n+++ b/tensorflow_addons/layers/optical_flow.py\n@@ -27,15 +27,15 @@\n \n \n @tf.function\n-def correlation_cost(input_a,\n- input_b,\n- kernel_size,\n- max_displacement,\n- stride_1,\n- stride_2,\n- pad,\n- data_format='channels_last',\n- name=None):\n+def _correlation_cost(input_a,\n+ input_b,\n+ kernel_size,\n+ max_displacement,\n+ stride_1,\n+ stride_2,\n+ pad,\n+ data_format='channels_last',\n+ name=None):\n \"\"\"Correlation Cost Volume computation.\n \n \"FlowNet: Learning Optical Flow with Convolutional Networks\"\n@@ -141,6 +141,27 @@\n \n @keras_utils.register_keras_custom_object\n class CorrelationCost(tf.keras.layers.Layer):\n+ \"\"\"Correlation Cost Layer.\n+\n+ This layer implements the correlation operation from FlowNet Learning\n+ Optical Flow with Convolutional Networks (Fischer et al.):\n+ https://arxiv.org/abs/1504.06\n+\n+ Args:\n+ kernel_size: An integer specifying the height and width of the\n+ patch used to compute the per-patch costs.\n+ max_displacement: An integer specifying the maximum search radius\n+ for each position.\n+ stride_1: An integer specifying the stride length in the input.\n+ stride_2: An integer specifying the stride length in the patch.\n+ pad: An integer specifying the paddings in height and width.\n+ data_format: Specifies the data format.\n+ Possible values are:\n+ \"channels_last\" float [batch, height, width, channels]\n+ \"channels_first\" float [batch, channels, height, width]\n+ Defaults to `\"channels_last\"`.\n+ \"\"\"\n+\n def __init__(self, kernel_size, max_displacement, stride_1, stride_2, pad,\n data_format, **kwargs):\n self.kernel_size = kernel_size\n@@ -169,7 +190,7 @@\n input_a = tf.convert_to_tensor(inputs[0])\n input_b = tf.convert_to_tensor(inputs[1])\n \n- return correlation_cost(\n+ return _correlation_cost(\n input_a,\n input_b,\n kernel_size=self.kernel_size,\n", "issue": "Add docstring for Correlation cost layer\n1. Hide [correlation_cost](https://github.com/tensorflow/addons/blob/cffee80779e606870ebf2b7fce241eeb870053f5/tensorflow_addons/layers/optical_flow.py#L30) function, and make it private.\r\n2. Add document for [CorrelationCost](https://github.com/tensorflow/addons/blob/cffee80779e606870ebf2b7fce241eeb870053f5/tensorflow_addons/layers/optical_flow.py#L143)\r\n\r\nFor details please check the discussion in https://github.com/tensorflow/addons/pull/207\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tensorflow op performing correlation cost operation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils import keras_utils\nfrom tensorflow_addons.utils.resource_loader import get_path_to_datafile\n\n_correlation_cost_op_so = tf.load_op_library(\n get_path_to_datafile(\"custom_ops/layers/_correlation_cost_ops.so\"))\n\n\[email protected]\ndef correlation_cost(input_a,\n input_b,\n kernel_size,\n max_displacement,\n stride_1,\n stride_2,\n pad,\n data_format='channels_last',\n name=None):\n \"\"\"Correlation Cost Volume computation.\n\n \"FlowNet: Learning Optical Flow with Convolutional Networks\"\n Philipp Fischer, Alexey Dosovitskiy, Eddy Ilg, Philip Hausser,\n Caner Hazirbas, Vladimir Golkov, Patrick van der Smagt,\n Daniel Cremers, Thomas Brox. https://arxiv.org/abs/1504.06852\n\n Computes a cost volume using correlation for two inputs. For feature\n maps A, B with spatial dimensions w, h, c it computes\n\n output(a, b) = sum_{l in [-k,k]**2} < I(a+l), J(b+l) >\n\n where the patches of size K=2d + 1 are centered in position a resp. b.\n\n The output shape is [B, C', H', W'], where\n\n r = max_displacement / stride_2;\n bd = max_displacement + (kernel_size - 1) / 2\n C' = (2 * r + 1) ** 2\n H' = H + 2 * (pad - bd) / stride_1\n W' = W + 2 * (pad - bd) / stride_1\n\n Note: When the data_format requests \"channels_last\", an additional explicit\n transpose operation is executed.\n\n Args:\n input_a: A `Tensor` of the format specified by `data_format`.\n input_b: A `Tensor` of the format specified by `data_format`.\n kernel_size: An integer specifying the height and width of the\n patch used to compute the per-patch costs.\n max_displacement: An integer specifying the maximum search radius\n for each position.\n stride_1: An integer specifying the stride length in the input.\n stride_2: An integer specifying the stride length in the patch.\n pad: An integer specifying the paddings in height and width.\n data_format: Specifies the data format.\n Possible values are:\n \"channels_last\" float [batch, height, width, channels]\n \"channels_first\" float [batch, channels, height, width]\n Defaults to `\"channels_last\"`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the format specified by `data_format`.\n \"\"\"\n\n with tf.name_scope(name or \"correlation_cost\"):\n op_call = _correlation_cost_op_so.addons_correlation_cost\n\n if data_format == \"channels_last\":\n op_data_format = \"NHWC\"\n elif data_format == \"channels_first\":\n op_data_format = \"NCHW\"\n else:\n raise ValueError(\"`data_format` must be either `channels_last` or\"\n \"`channels_first`\")\n\n ret = op_call(\n input_a,\n input_b,\n kernel_size=kernel_size,\n max_displacement=max_displacement,\n stride_1=stride_1,\n stride_2=stride_2,\n pad=pad,\n data_format=op_data_format)\n if data_format == 'channels_last':\n # this is easier to maintain without\n # specializing an additional cuda kernel\n return tf.transpose(ret, [0, 2, 3, 1])\n return ret\n\n\[email protected](\"Addons>CorrelationCost\")\ndef _correlation_cost_grad(op, grad_output):\n kernel_size = op.get_attr(\"kernel_size\")\n max_displacement = op.get_attr(\"max_displacement\")\n stride_1 = op.get_attr(\"stride_1\")\n stride_2 = op.get_attr(\"stride_2\")\n pad = op.get_attr(\"pad\")\n data_format = op.get_attr(\"data_format\")\n\n input_a = tf.convert_to_tensor(op.inputs[0], name=\"input_a\")\n input_b = tf.convert_to_tensor(op.inputs[1], name=\"input_b\")\n grad_output_tensor = tf.convert_to_tensor(grad_output, name=\"grad_output\")\n\n op_call = _correlation_cost_op_so.addons_correlation_cost_grad\n grads = op_call(\n input_a,\n input_b,\n grad_output_tensor,\n kernel_size=kernel_size,\n max_displacement=max_displacement,\n stride_1=stride_1,\n stride_2=stride_2,\n pad=pad,\n data_format=data_format)\n\n grad_input_a = tf.convert_to_tensor(grads[0], name=\"grad_input_a\")\n grad_input_b = tf.convert_to_tensor(grads[1], name=\"grad_input_b\")\n return [grad_input_a, grad_input_b]\n\n\n@keras_utils.register_keras_custom_object\nclass CorrelationCost(tf.keras.layers.Layer):\n def __init__(self, kernel_size, max_displacement, stride_1, stride_2, pad,\n data_format, **kwargs):\n self.kernel_size = kernel_size\n self.max_displacement = max_displacement\n self.stride_1 = stride_1\n self.stride_2 = stride_2\n self.pad = pad\n\n if data_format != \"channels_last\" and data_format != \"channels_first\":\n raise ValueError(\"`data_format` must be either `channels_last` or\"\n \"`channels_first`, instead got %s\" % data_format)\n\n self.data_format = data_format\n\n super(CorrelationCost, self).__init__(**kwargs)\n\n def build(self, input_shape):\n if not isinstance(input_shape, list):\n raise ValueError(\"Input must be a list of two Tensors to process\")\n super(CorrelationCost, self).build(input_shape)\n\n def call(self, inputs):\n if not isinstance(inputs, list):\n raise ValueError(\"Input must be a list of two Tensors to process\")\n\n input_a = tf.convert_to_tensor(inputs[0])\n input_b = tf.convert_to_tensor(inputs[1])\n\n return correlation_cost(\n input_a,\n input_b,\n kernel_size=self.kernel_size,\n max_displacement=self.max_displacement,\n stride_1=self.stride_1,\n stride_2=self.stride_2,\n pad=self.pad,\n data_format=self.data_format)\n\n def compute_output_shape(self, input_shape):\n assert isinstance(input_shape, list)\n n = input_shape[0][0]\n r = self.max_displacement / self.stride_2\n bd = self.max_displacement + (self.kernel_size - 1) / 2\n output_c = (2 * r + 1)**2\n\n if self.data_format == \"channels_first\":\n output_h = input_shape[0][1] + 2 * (self.pad - bd) / self.stride_1\n output_w = input_shape[0][2] + 2 * (self.pad - bd) / self.stride_1\n return [n, output_c, output_h, output_w]\n\n elif self.data_format == \"channels_last\":\n output_h = input_shape[0][0] + 2 * (self.pad - bd) / self.stride_1\n output_w = input_shape[0][1] + 2 * (self.pad - bd) / self.stride_1\n return [n, output_h, output_w, output_c]\n else:\n raise ValueError(\"`data_format` must be either `channels_last` or\"\n \"`channels_first`\")\n\n def get_config(self):\n config = {\n 'kernel_size': self.kernel_size,\n 'max_displacement': self.max_displacement,\n 'stride_1': self.stride_1,\n 'stride_2': self.stride_2,\n 'pad': self.pad,\n 'data_format': self.data_format\n }\n\n base_config = super(CorrelationCost, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "tensorflow_addons/layers/optical_flow.py"}]} | 3,212 | 561 |
gh_patches_debug_17989 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2336 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEATURE]: Make CUDA extensions default to optional
### Describe the feature
When installing `Colossal-AI` with `pip install .` from source or `pip install colossalai` from pypi, we always build CUDA extensions by default. However, this brings many problems for users who do not have a well-built environment with CUDA and torch. As a result, they failed at the first step, which is installation. I would suggest not to build extensions by default. The users can build extensions in two ways:
1. export `CUDA_EXT=1` when running `pip install`
2. build CUDA kernels only when the kernel is used
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3
4 from setuptools import find_packages, setup
5
6 from op_builder.utils import get_cuda_bare_metal_version
7
8 try:
9 import torch
10 from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
11 print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
12 TORCH_MAJOR = int(torch.__version__.split('.')[0])
13 TORCH_MINOR = int(torch.__version__.split('.')[1])
14
15 if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 10):
16 raise RuntimeError("Colossal-AI requires Pytorch 1.10 or newer.\n"
17 "The latest stable release can be obtained from https://pytorch.org/")
18 TORCH_AVAILABLE = True
19 except ImportError:
20 TORCH_AVAILABLE = False
21
22
23 # ninja build does not work unless include_dirs are abs path
24 this_dir = os.path.dirname(os.path.abspath(__file__))
25 build_cuda_ext = True
26 ext_modules = []
27
28 if int(os.environ.get('NO_CUDA_EXT', '0')) == 1 or not TORCH_AVAILABLE:
29 build_cuda_ext = False
30
31
32 def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
33 raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
34 torch_binary_major = torch.version.cuda.split(".")[0]
35 torch_binary_minor = torch.version.cuda.split(".")[1]
36
37 print("\nCompiling cuda extensions with")
38 print(raw_output + "from " + cuda_dir + "/bin\n")
39
40 if bare_metal_major != torch_binary_major:
41 print(f'The detected CUDA version ({raw_output}) mismatches the version that was used to compile PyTorch '
42 f'({torch.version.cuda}). CUDA extension will not be installed.')
43 return False
44
45 if bare_metal_minor != torch_binary_minor:
46 print("\nWarning: Cuda extensions are being compiled with a version of Cuda that does "
47 "not match the version used to compile Pytorch binaries. "
48 f"Pytorch binaries were compiled with Cuda {torch.version.cuda}.\n"
49 "In some cases, a minor-version mismatch will not cause later errors: "
50 "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. ")
51 return True
52
53
54 def check_cuda_availability(cuda_dir):
55 if not torch.cuda.is_available():
56 # https://github.com/NVIDIA/apex/issues/486
57 # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query
58 # torch.cuda.get_device_capability(), which will fail if you are compiling in an environment
59 # without visible GPUs (e.g. during an nvidia-docker build command).
60 print(
61 '\nWarning: Torch did not find available GPUs on this system.\n',
62 'If your intention is to cross-compile, this is not an error.\n'
63 'By default, Colossal-AI will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
64 'Volta (compute capability 7.0), Turing (compute capability 7.5),\n'
65 'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n'
66 'If you wish to cross-compile for a single specific architecture,\n'
67 'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
68 if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
69 _, bare_metal_major, _ = get_cuda_bare_metal_version(cuda_dir)
70 if int(bare_metal_major) == 11:
71 os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
72 else:
73 os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
74 return False
75
76 if cuda_dir is None:
77 print("nvcc was not found. CUDA extension will not be installed. If you're installing within a container from "
78 "https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
79 return False
80 return True
81
82
83 def append_nvcc_threads(nvcc_extra_args):
84 _, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
85 if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
86 return nvcc_extra_args + ["--threads", "4"]
87 return nvcc_extra_args
88
89
90 def fetch_requirements(path):
91 with open(path, 'r') as fd:
92 return [r.strip() for r in fd.readlines()]
93
94
95 def fetch_readme():
96 with open('README.md', encoding='utf-8') as f:
97 return f.read()
98
99
100 def get_version():
101 setup_file_path = os.path.abspath(__file__)
102 project_path = os.path.dirname(setup_file_path)
103 version_txt_path = os.path.join(project_path, 'version.txt')
104 version_py_path = os.path.join(project_path, 'colossalai/version.py')
105
106 with open(version_txt_path) as f:
107 version = f.read().strip()
108 if build_cuda_ext:
109 torch_version = '.'.join(torch.__version__.split('.')[:2])
110 cuda_version = '.'.join(get_cuda_bare_metal_version(CUDA_HOME)[1:])
111 version += f'+torch{torch_version}cu{cuda_version}'
112
113 # write version into version.py
114 with open(version_py_path, 'w') as f:
115 f.write(f"__version__ = '{version}'\n")
116
117 return version
118
119
120 if build_cuda_ext:
121 build_cuda_ext = check_cuda_availability(CUDA_HOME) and check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
122
123 if build_cuda_ext:
124 # Set up macros for forward/backward compatibility hack around
125 # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
126 # and
127 # https://github.com/NVIDIA/apex/issues/456
128 # https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
129 version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']
130
131 def cuda_ext_helper(name, sources, extra_cuda_flags, extra_cxx_flags=[]):
132 return CUDAExtension(
133 name=name,
134 sources=[os.path.join('colossalai/kernel/cuda_native/csrc', path) for path in sources],
135 include_dirs=[os.path.join(this_dir, 'colossalai/kernel/cuda_native/csrc/kernels/include')],
136 extra_compile_args={
137 'cxx': ['-O3'] + version_dependent_macros + extra_cxx_flags,
138 'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + version_dependent_macros + extra_cuda_flags)
139 })
140
141 #### fused optim kernels ###
142 from op_builder import FusedOptimBuilder
143 ext_modules.append(FusedOptimBuilder().builder('colossalai._C.fused_optim'))
144
145 #### N-D parallel kernels ###
146 cc_flag = []
147 for arch in torch.cuda.get_arch_list():
148 res = re.search(r'sm_(\d+)', arch)
149 if res:
150 arch_cap = res[1]
151 if int(arch_cap) >= 60:
152 cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])
153
154 extra_cuda_flags = [
155 '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',
156 '--expt-extended-lambda'
157 ]
158
159 from op_builder import ScaledSoftmaxBuilder
160 ext_modules.append(ScaledSoftmaxBuilder().builder('colossalai._C.scaled_upper_triang_masked_softmax'))
161
162 ext_modules.append(
163 cuda_ext_helper('colossalai._C.scaled_masked_softmax',
164 ['scaled_masked_softmax.cpp', 'scaled_masked_softmax_cuda.cu'], extra_cuda_flags + cc_flag))
165
166 from op_builder import MOEBuilder
167 ext_modules.append(MOEBuilder().builder('colossalai._C.moe'))
168
169 extra_cuda_flags = ['-maxrregcount=50']
170
171 ext_modules.append(
172 cuda_ext_helper('colossalai._C.layer_norm', ['layer_norm_cuda.cpp', 'layer_norm_cuda_kernel.cu'],
173 extra_cuda_flags + cc_flag))
174
175 ### MultiHeadAttn Kernel ####
176 from op_builder import MultiHeadAttnBuilder
177 ext_modules.append(MultiHeadAttnBuilder().builder('colossalai._C.multihead_attention'))
178
179 ### Gemini Adam kernel ####
180 from op_builder import CPUAdamBuilder
181 ext_modules.append(CPUAdamBuilder().builder('colossalai._C.cpu_optim'))
182
183 setup(name='colossalai',
184 version=get_version(),
185 packages=find_packages(exclude=(
186 'benchmark',
187 'docker',
188 'tests',
189 'docs',
190 'examples',
191 'tests',
192 'scripts',
193 'requirements',
194 '*.egg-info',
195 )),
196 description='An integrated large-scale model training system with efficient parallelization techniques',
197 long_description=fetch_readme(),
198 long_description_content_type='text/markdown',
199 license='Apache Software License 2.0',
200 url='https://www.colossalai.org',
201 project_urls={
202 'Forum': 'https://github.com/hpcaitech/ColossalAI/discussions',
203 'Bug Tracker': 'https://github.com/hpcaitech/ColossalAI/issues',
204 'Examples': 'https://github.com/hpcaitech/ColossalAI-Examples',
205 'Documentation': 'http://colossalai.readthedocs.io',
206 'Github': 'https://github.com/hpcaitech/ColossalAI',
207 },
208 ext_modules=ext_modules,
209 cmdclass={'build_ext': BuildExtension} if ext_modules else {},
210 install_requires=fetch_requirements('requirements/requirements.txt'),
211 entry_points='''
212 [console_scripts]
213 colossalai=colossalai.cli:cli
214 ''',
215 python_requires='>=3.6',
216 classifiers=[
217 'Programming Language :: Python :: 3',
218 'License :: OSI Approved :: Apache Software License',
219 'Environment :: GPU :: NVIDIA CUDA',
220 'Topic :: Scientific/Engineering :: Artificial Intelligence',
221 'Topic :: System :: Distributed Computing',
222 ],
223 package_data={'colossalai': ['_C/*.pyi']})
224
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,15 +18,22 @@
TORCH_AVAILABLE = True
except ImportError:
TORCH_AVAILABLE = False
+ CUDA_HOME = None
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
-build_cuda_ext = True
+build_cuda_ext = False
ext_modules = []
-if int(os.environ.get('NO_CUDA_EXT', '0')) == 1 or not TORCH_AVAILABLE:
- build_cuda_ext = False
+if int(os.environ.get('CUDA_EXT', '0')) == 1:
+ if not TORCH_AVAILABLE:
+ raise ModuleNotFoundError("PyTorch is not found while CUDA_EXT=1. You need to install PyTorch first in order to build CUDA extensions")
+
+ if not CUDA_HOME:
+ raise RuntimeError("CUDA_HOME is not found while CUDA_EXT=1. You need to export CUDA_HOME environment vairable or install CUDA Toolkit first in order to build CUDA extensions")
+
+ build_cuda_ext = True
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,15 +18,22 @@\n TORCH_AVAILABLE = True\n except ImportError:\n TORCH_AVAILABLE = False\n+ CUDA_HOME = None\n \n \n # ninja build does not work unless include_dirs are abs path\n this_dir = os.path.dirname(os.path.abspath(__file__))\n-build_cuda_ext = True\n+build_cuda_ext = False\n ext_modules = []\n \n-if int(os.environ.get('NO_CUDA_EXT', '0')) == 1 or not TORCH_AVAILABLE:\n- build_cuda_ext = False\n+if int(os.environ.get('CUDA_EXT', '0')) == 1:\n+ if not TORCH_AVAILABLE:\n+ raise ModuleNotFoundError(\"PyTorch is not found while CUDA_EXT=1. You need to install PyTorch first in order to build CUDA extensions\")\n+\n+ if not CUDA_HOME:\n+ raise RuntimeError(\"CUDA_HOME is not found while CUDA_EXT=1. You need to export CUDA_HOME environment vairable or install CUDA Toolkit first in order to build CUDA extensions\")\n+\n+ build_cuda_ext = True\n \n \n def check_cuda_torch_binary_vs_bare_metal(cuda_dir):\n", "issue": "[FEATURE]: Make CUDA extensions default to optional\n### Describe the feature\n\nWhen installing `Colossal-AI` with `pip install .` from source or `pip install colossalai` from pypi, we always build CUDA extensions by default. However, this brings many problems for users who do not have a well-built environment with CUDA and torch. As a result, they failed at the first step, which is installation. I would suggest not to build extensions by default. The users can build extensions in two ways:\r\n1. export `CUDA_EXT=1` when running `pip install`\r\n2. build CUDA kernels only when the kernel is used\n", "before_files": [{"content": "import os\nimport re\n\nfrom setuptools import find_packages, setup\n\nfrom op_builder.utils import get_cuda_bare_metal_version\n\ntry:\n import torch\n from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension\n print(\"\\n\\ntorch.__version__ = {}\\n\\n\".format(torch.__version__))\n TORCH_MAJOR = int(torch.__version__.split('.')[0])\n TORCH_MINOR = int(torch.__version__.split('.')[1])\n\n if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 10):\n raise RuntimeError(\"Colossal-AI requires Pytorch 1.10 or newer.\\n\"\n \"The latest stable release can be obtained from https://pytorch.org/\")\n TORCH_AVAILABLE = True\nexcept ImportError:\n TORCH_AVAILABLE = False\n\n\n# ninja build does not work unless include_dirs are abs path\nthis_dir = os.path.dirname(os.path.abspath(__file__))\nbuild_cuda_ext = True\next_modules = []\n\nif int(os.environ.get('NO_CUDA_EXT', '0')) == 1 or not TORCH_AVAILABLE:\n build_cuda_ext = False\n\n\ndef check_cuda_torch_binary_vs_bare_metal(cuda_dir):\n raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)\n torch_binary_major = torch.version.cuda.split(\".\")[0]\n torch_binary_minor = torch.version.cuda.split(\".\")[1]\n\n print(\"\\nCompiling cuda extensions with\")\n print(raw_output + \"from \" + cuda_dir + \"/bin\\n\")\n\n if bare_metal_major != torch_binary_major:\n print(f'The detected CUDA version ({raw_output}) mismatches the version that was used to compile PyTorch '\n f'({torch.version.cuda}). CUDA extension will not be installed.')\n return False\n\n if bare_metal_minor != torch_binary_minor:\n print(\"\\nWarning: Cuda extensions are being compiled with a version of Cuda that does \"\n \"not match the version used to compile Pytorch binaries. \"\n f\"Pytorch binaries were compiled with Cuda {torch.version.cuda}.\\n\"\n \"In some cases, a minor-version mismatch will not cause later errors: \"\n \"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. \")\n return True\n\n\ndef check_cuda_availability(cuda_dir):\n if not torch.cuda.is_available():\n # https://github.com/NVIDIA/apex/issues/486\n # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query\n # torch.cuda.get_device_capability(), which will fail if you are compiling in an environment\n # without visible GPUs (e.g. during an nvidia-docker build command).\n print(\n '\\nWarning: Torch did not find available GPUs on this system.\\n',\n 'If your intention is to cross-compile, this is not an error.\\n'\n 'By default, Colossal-AI will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\\n'\n 'Volta (compute capability 7.0), Turing (compute capability 7.5),\\n'\n 'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\\n'\n 'If you wish to cross-compile for a single specific architecture,\\n'\n 'export TORCH_CUDA_ARCH_LIST=\"compute capability\" before running setup.py.\\n')\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n _, bare_metal_major, _ = get_cuda_bare_metal_version(cuda_dir)\n if int(bare_metal_major) == 11:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = \"6.0;6.1;6.2;7.0;7.5;8.0\"\n else:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = \"6.0;6.1;6.2;7.0;7.5\"\n return False\n\n if cuda_dir is None:\n print(\"nvcc was not found. CUDA extension will not be installed. If you're installing within a container from \"\n \"https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n return False\n return True\n\n\ndef append_nvcc_threads(nvcc_extra_args):\n _, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)\n if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:\n return nvcc_extra_args + [\"--threads\", \"4\"]\n return nvcc_extra_args\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ndef fetch_readme():\n with open('README.md', encoding='utf-8') as f:\n return f.read()\n\n\ndef get_version():\n setup_file_path = os.path.abspath(__file__)\n project_path = os.path.dirname(setup_file_path)\n version_txt_path = os.path.join(project_path, 'version.txt')\n version_py_path = os.path.join(project_path, 'colossalai/version.py')\n\n with open(version_txt_path) as f:\n version = f.read().strip()\n if build_cuda_ext:\n torch_version = '.'.join(torch.__version__.split('.')[:2])\n cuda_version = '.'.join(get_cuda_bare_metal_version(CUDA_HOME)[1:])\n version += f'+torch{torch_version}cu{cuda_version}'\n\n # write version into version.py\n with open(version_py_path, 'w') as f:\n f.write(f\"__version__ = '{version}'\\n\")\n\n return version\n\n\nif build_cuda_ext:\n build_cuda_ext = check_cuda_availability(CUDA_HOME) and check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)\n\nif build_cuda_ext:\n # Set up macros for forward/backward compatibility hack around\n # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e\n # and\n # https://github.com/NVIDIA/apex/issues/456\n # https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac\n version_dependent_macros = ['-DVERSION_GE_1_1', '-DVERSION_GE_1_3', '-DVERSION_GE_1_5']\n\n def cuda_ext_helper(name, sources, extra_cuda_flags, extra_cxx_flags=[]):\n return CUDAExtension(\n name=name,\n sources=[os.path.join('colossalai/kernel/cuda_native/csrc', path) for path in sources],\n include_dirs=[os.path.join(this_dir, 'colossalai/kernel/cuda_native/csrc/kernels/include')],\n extra_compile_args={\n 'cxx': ['-O3'] + version_dependent_macros + extra_cxx_flags,\n 'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + version_dependent_macros + extra_cuda_flags)\n })\n\n #### fused optim kernels ###\n from op_builder import FusedOptimBuilder\n ext_modules.append(FusedOptimBuilder().builder('colossalai._C.fused_optim'))\n\n #### N-D parallel kernels ###\n cc_flag = []\n for arch in torch.cuda.get_arch_list():\n res = re.search(r'sm_(\\d+)', arch)\n if res:\n arch_cap = res[1]\n if int(arch_cap) >= 60:\n cc_flag.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])\n\n extra_cuda_flags = [\n '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '--expt-relaxed-constexpr',\n '--expt-extended-lambda'\n ]\n\n from op_builder import ScaledSoftmaxBuilder\n ext_modules.append(ScaledSoftmaxBuilder().builder('colossalai._C.scaled_upper_triang_masked_softmax'))\n\n ext_modules.append(\n cuda_ext_helper('colossalai._C.scaled_masked_softmax',\n ['scaled_masked_softmax.cpp', 'scaled_masked_softmax_cuda.cu'], extra_cuda_flags + cc_flag))\n\n from op_builder import MOEBuilder\n ext_modules.append(MOEBuilder().builder('colossalai._C.moe'))\n\n extra_cuda_flags = ['-maxrregcount=50']\n\n ext_modules.append(\n cuda_ext_helper('colossalai._C.layer_norm', ['layer_norm_cuda.cpp', 'layer_norm_cuda_kernel.cu'],\n extra_cuda_flags + cc_flag))\n\n ### MultiHeadAttn Kernel ####\n from op_builder import MultiHeadAttnBuilder\n ext_modules.append(MultiHeadAttnBuilder().builder('colossalai._C.multihead_attention'))\n\n ### Gemini Adam kernel ####\n from op_builder import CPUAdamBuilder\n ext_modules.append(CPUAdamBuilder().builder('colossalai._C.cpu_optim'))\n\nsetup(name='colossalai',\n version=get_version(),\n packages=find_packages(exclude=(\n 'benchmark',\n 'docker',\n 'tests',\n 'docs',\n 'examples',\n 'tests',\n 'scripts',\n 'requirements',\n '*.egg-info',\n )),\n description='An integrated large-scale model training system with efficient parallelization techniques',\n long_description=fetch_readme(),\n long_description_content_type='text/markdown',\n license='Apache Software License 2.0',\n url='https://www.colossalai.org',\n project_urls={\n 'Forum': 'https://github.com/hpcaitech/ColossalAI/discussions',\n 'Bug Tracker': 'https://github.com/hpcaitech/ColossalAI/issues',\n 'Examples': 'https://github.com/hpcaitech/ColossalAI-Examples',\n 'Documentation': 'http://colossalai.readthedocs.io',\n 'Github': 'https://github.com/hpcaitech/ColossalAI',\n },\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension} if ext_modules else {},\n install_requires=fetch_requirements('requirements/requirements.txt'),\n entry_points='''\n [console_scripts]\n colossalai=colossalai.cli:cli\n ''',\n python_requires='>=3.6',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: Apache Software License',\n 'Environment :: GPU :: NVIDIA CUDA',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: System :: Distributed Computing',\n ],\n package_data={'colossalai': ['_C/*.pyi']})\n", "path": "setup.py"}]} | 3,657 | 260 |
gh_patches_debug_192 | rasdani/github-patches | git_diff | liberapay__liberapay.com-726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The list of top individuals is incomplete
While looking at https://liberapay.com/explore/individuals I realized that ploum isn't listed. It's because he doesn't have a profile statement. The thinking was that without a statement there isn't much to see on a profile page, so there's little point in linking to it. However it also makes the list incomplete.
The list of top individuals is incomplete
While looking at https://liberapay.com/explore/individuals I realized that ploum isn't listed. It's because he doesn't have a profile statement. The thinking was that without a statement there isn't much to see on a profile page, so there's little point in linking to it. However it also makes the list incomplete.
</issue>
<code>
[start of liberapay/constants.py]
1 # coding: utf8
2 from __future__ import print_function, unicode_literals
3
4 from collections import namedtuple, OrderedDict
5 from datetime import date, datetime, timedelta
6 from decimal import Decimal, ROUND_UP
7 import re
8
9 from jinja2 import StrictUndefined
10 from pando.utils import utc
11
12
13 class CustomUndefined(StrictUndefined):
14 __bool__ = __nonzero__ = lambda self: False
15
16 def __str__(self):
17 try:
18 self._fail_with_undefined_error()
19 except Exception as e:
20 self._tell_sentry(e, {})
21 return ''
22
23 __unicode__ = __str__
24
25
26 def check_bits(bits):
27 assert len(set(bits)) == len(bits) # no duplicates
28 assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit
29
30
31 Event = namedtuple('Event', 'name bit title')
32
33 Fees = namedtuple('Fees', ('var', 'fix'))
34
35 StandardTip = namedtuple('StandardTip', 'label weekly monthly yearly')
36
37
38 _ = lambda a: a
39
40 ASCII_ALLOWED_IN_USERNAME = set("0123456789"
41 "abcdefghijklmnopqrstuvwxyz"
42 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
43 "-_.")
44
45 AVATAR_QUERY = '?s=160&default=retro'
46 AVATAR_SOURCES = 'libravatar bitbucket facebook github google mastodon twitter'.split()
47
48 BIRTHDAY = date(2015, 5, 22)
49
50 D_CENT = Decimal('0.01')
51 D_INF = Decimal('inf')
52 D_UNIT = Decimal('1.00')
53 D_ZERO = Decimal('0.00')
54
55 DONATION_LIMITS_WEEKLY = (Decimal('0.01'), Decimal('100.00'))
56 DONATION_LIMITS = {
57 'weekly': DONATION_LIMITS_WEEKLY,
58 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)
59 for x in DONATION_LIMITS_WEEKLY),
60 'yearly': tuple((x * Decimal(52)).quantize(D_CENT)
61 for x in DONATION_LIMITS_WEEKLY),
62 }
63 DONATION_WEEKLY_MIN, DONATION_WEEKLY_MAX = DONATION_LIMITS_WEEKLY
64
65 DOMAIN_RE = re.compile(r'''
66 ^
67 ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\.)+
68 [a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?
69 $
70 ''', re.VERBOSE)
71
72 ELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}
73
74 EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
75 EMAIL_RE = re.compile(r'''
76 # This is the regexp used by MangoPay (as of February 2017).
77 # It rejects some valid but exotic addresses.
78 # https://en.wikipedia.org/wiki/Email_address
79 ^
80 [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*
81 @
82 ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?
83 $
84 ''', re.VERBOSE)
85
86 EPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)
87
88 EVENTS = [
89 Event('income', 1, _("When I receive money")),
90 Event('low_balance', 2, _("When there isn't enough money in my wallet to cover my donations")),
91 Event('withdrawal_created', 4, _("When a transfer to my bank account is initiated")),
92 Event('withdrawal_failed', 8, _("When a transfer to my bank account fails")),
93 Event('pledgee_joined', 16, _("When someone I pledge to joins Liberapay")),
94 Event('team_invite', 32, _("When someone invites me to join a team")),
95 Event('payin_bankwire_failed', 64, _("When a bank wire transfer to my Liberapay wallet fails")),
96 Event('payin_bankwire_succeeded', 128, _("When a bank wire transfer to my Liberapay wallet succeeds")),
97 Event('payin_bankwire_expired', 256, _("When a bank wire transfer to my Liberapay wallet expires")),
98 Event('payin_directdebit_failed', 512, _("When a direct debit from my bank account fails")),
99 Event('payin_directdebit_succeeded', 1024, _("When a direct debit from my bank account succeeds")),
100 ]
101 check_bits([e.bit for e in EVENTS])
102 EVENTS = OrderedDict((e.name, e) for e in EVENTS)
103 EVENTS_S = ' '.join(EVENTS.keys())
104
105 # https://www.mangopay.com/pricing/
106 FEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), Decimal(0)) # 0.5%
107 FEE_PAYIN_CARD = Fees(Decimal('0.018'), Decimal('0.18')) # 1.8% + €0.18
108 FEE_PAYIN_DIRECT_DEBIT = Fees(Decimal(0), Decimal('0.80')) # €0.80
109 FEE_PAYOUT = Fees(Decimal(0), Decimal(0))
110 FEE_PAYOUT_OUTSIDE_SEPA = Fees(Decimal(0), Decimal('2.5'))
111 FEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%
112 FEE_VAT = Decimal('0.17') # 17% (Luxembourg rate)
113
114 INVOICE_DOC_MAX_SIZE = 5000000
115 INVOICE_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'png']
116 INVOICE_DOCS_LIMIT = 10
117
118 INVOICE_NATURES = {
119 'expense': _("Expense Report"),
120 }
121
122 INVOICE_STATUSES = {
123 'pre': _("Draft"),
124 'new': _("Sent (awaiting approval)"),
125 'retracted': _("Retracted"),
126 'accepted': _("Accepted (awaiting payment)"),
127 'paid': _("Paid"),
128 'rejected': _("Rejected"),
129 }
130
131 JINJA_ENV_COMMON = dict(
132 trim_blocks=True, lstrip_blocks=True,
133 line_statement_prefix='%',
134 # undefined=CustomUndefined,
135 )
136
137 # https://docs.mangopay.com/api-references/kyc-rules/
138 KYC_DOC_MAX_SIZE = 7000000
139 KYC_DOC_MAX_SIZE_MB = int(KYC_DOC_MAX_SIZE / 1000000)
140 KYC_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'gif', 'png']
141 KYC_DOCS_EXTS_STR = ', '.join(KYC_DOCS_EXTS)
142 KYC_INCOME_THRESHOLDS = (
143 (1, 18000),
144 (2, 30000),
145 (3, 50000),
146 (4, 80000),
147 (5, 120000),
148 (6, 120000),
149 )
150 KYC_PAYIN_YEARLY_THRESHOLD = Decimal('2500')
151 KYC_PAYOUT_YEARLY_THRESHOLD = Decimal('1000')
152
153 LAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)
154
155 PARTICIPANT_KINDS = {
156 'individual': _("Individual"),
157 'organization': _("Organization"),
158 'group': _("Team"),
159 }
160
161 PASSWORD_MIN_SIZE = 8
162 PASSWORD_MAX_SIZE = 150
163
164 PAYIN_BANK_WIRE_MIN = Decimal('2.00') # fee ≈ 0.99%
165 PAYIN_BANK_WIRE_TARGET = Decimal('5.00') # fee ≈ 0.6%
166 PAYIN_CARD_MIN = Decimal("15.00") # fee ≈ 3.5%
167 PAYIN_CARD_TARGET = Decimal("92.00") # fee ≈ 2.33%
168 PAYIN_DIRECT_DEBIT_MIN = Decimal('25.00') # fee ≈ 3.6%
169 PAYIN_DIRECT_DEBIT_TARGET = Decimal('99.00') # fee ≈ 0.94%
170
171 PERIOD_CONVERSION_RATES = {
172 'weekly': Decimal(1),
173 'monthly': Decimal(12) / Decimal(52),
174 'yearly': Decimal(1) / Decimal(52),
175 }
176
177 POSTAL_ADDRESS_KEYS = (
178 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'
179 )
180
181 PRIVACY_FIELDS = OrderedDict([
182 ('hide_giving', _("Hide total giving from others.")),
183 ('hide_receiving', _("Hide total receiving from others.")),
184 ('hide_from_search', _("Hide myself from search results on Liberapay.")),
185 ('profile_noindex', _("Tell web search engines not to index my profile.")),
186 ('hide_from_lists', _("Prevent my profile from being listed on Liberapay.")),
187 ])
188 PRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())
189
190 PRIVILEGES = dict(admin=1, run_payday=2)
191 check_bits(list(PRIVILEGES.values()))
192
193 QUARANTINE = timedelta(weeks=4)
194
195 RATE_LIMITS = {
196 'add_email.source': (5, 60*60*24), # 5 per day
197 'add_email.target': (2, 60*60*24), # 2 per day
198 'change_username': (7, 60*60*24*7), # 7 per week
199 'log-in.email': (10, 60*60*24), # 10 per day
200 'log-in.email.not-verified': (2, 60*60*24), # 2 per day
201 'log-in.email.verified': (10, 60*60*24), # 10 per day
202 'log-in.password': (3, 60*60), # 3 per hour
203 'sign-up.ip-addr': (5, 60*60), # 5 per hour per IP address
204 'sign-up.ip-net': (15, 15*60), # 15 per 15 minutes per IP network
205 'sign-up.ip-version': (15, 15*60), # 15 per 15 minutes per IP version
206 }
207
208 SEPA = set("""
209 AT BE BG CH CY CZ DE DK EE ES ES FI FR GB GI GR HR HU IE IS IT LI LT LU LV
210 MC MT NL NO PL PT RO SE SI SK
211 """.split())
212
213 SESSION = str('session') # bytes in python2, unicode in python3
214 SESSION_REFRESH = timedelta(hours=1)
215 SESSION_TIMEOUT = timedelta(hours=6)
216
217
218 def make_standard_tip(label, weekly):
219 return StandardTip(
220 label,
221 weekly,
222 weekly / PERIOD_CONVERSION_RATES['monthly'],
223 weekly / PERIOD_CONVERSION_RATES['yearly'],
224 )
225
226
227 STANDARD_TIPS = (
228 make_standard_tip(_("Symbolic"), Decimal('0.01')),
229 make_standard_tip(_("Small"), Decimal('0.25')),
230 make_standard_tip(_("Medium"), Decimal('1.00')),
231 make_standard_tip(_("Large"), Decimal('5.00')),
232 make_standard_tip(_("Maximum"), DONATION_WEEKLY_MAX),
233 )
234
235 USERNAME_MAX_SIZE = 32
236
237 del _
238
[end of liberapay/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/constants.py b/liberapay/constants.py
--- a/liberapay/constants.py
+++ b/liberapay/constants.py
@@ -232,6 +232,8 @@
make_standard_tip(_("Maximum"), DONATION_WEEKLY_MAX),
)
+SUMMARY_MAX_SIZE = 100
+
USERNAME_MAX_SIZE = 32
del _
| {"golden_diff": "diff --git a/liberapay/constants.py b/liberapay/constants.py\n--- a/liberapay/constants.py\n+++ b/liberapay/constants.py\n@@ -232,6 +232,8 @@\n make_standard_tip(_(\"Maximum\"), DONATION_WEEKLY_MAX),\n )\n \n+SUMMARY_MAX_SIZE = 100\n+\n USERNAME_MAX_SIZE = 32\n \n del _\n", "issue": "The list of top individuals is incomplete\nWhile looking at https://liberapay.com/explore/individuals I realized that ploum isn't listed. It's because he doesn't have a profile statement. The thinking was that without a statement there isn't much to see on a profile page, so there's little point in linking to it. However it also makes the list incomplete.\nThe list of top individuals is incomplete\nWhile looking at https://liberapay.com/explore/individuals I realized that ploum isn't listed. It's because he doesn't have a profile statement. The thinking was that without a statement there isn't much to see on a profile page, so there's little point in linking to it. However it also makes the list incomplete.\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import print_function, unicode_literals\n\nfrom collections import namedtuple, OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom decimal import Decimal, ROUND_UP\nimport re\n\nfrom jinja2 import StrictUndefined\nfrom pando.utils import utc\n\n\nclass CustomUndefined(StrictUndefined):\n __bool__ = __nonzero__ = lambda self: False\n\n def __str__(self):\n try:\n self._fail_with_undefined_error()\n except Exception as e:\n self._tell_sentry(e, {})\n return ''\n\n __unicode__ = __str__\n\n\ndef check_bits(bits):\n assert len(set(bits)) == len(bits) # no duplicates\n assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit\n\n\nEvent = namedtuple('Event', 'name bit title')\n\nFees = namedtuple('Fees', ('var', 'fix'))\n\nStandardTip = namedtuple('StandardTip', 'label weekly monthly yearly')\n\n\n_ = lambda a: a\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_.\")\n\nAVATAR_QUERY = '?s=160&default=retro'\nAVATAR_SOURCES = 'libravatar bitbucket facebook github google mastodon twitter'.split()\n\nBIRTHDAY = date(2015, 5, 22)\n\nD_CENT = Decimal('0.01')\nD_INF = Decimal('inf')\nD_UNIT = Decimal('1.00')\nD_ZERO = Decimal('0.00')\n\nDONATION_LIMITS_WEEKLY = (Decimal('0.01'), Decimal('100.00'))\nDONATION_LIMITS = {\n 'weekly': DONATION_LIMITS_WEEKLY,\n 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)\n for x in DONATION_LIMITS_WEEKLY),\n 'yearly': tuple((x * Decimal(52)).quantize(D_CENT)\n for x in DONATION_LIMITS_WEEKLY),\n}\nDONATION_WEEKLY_MIN, DONATION_WEEKLY_MAX = DONATION_LIMITS_WEEKLY\n\nDOMAIN_RE = re.compile(r'''\n ^\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+\n [a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'''\n # This is the regexp used by MangoPay (as of February 2017).\n # It rejects some valid but exotic addresses.\n # https://en.wikipedia.org/wiki/Email_address\n ^\n [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*\n @\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nEPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\nEVENTS = [\n Event('income', 1, _(\"When I receive money\")),\n Event('low_balance', 2, _(\"When there isn't enough money in my wallet to cover my donations\")),\n Event('withdrawal_created', 4, _(\"When a transfer to my bank account is initiated\")),\n Event('withdrawal_failed', 8, _(\"When a transfer to my bank account fails\")),\n Event('pledgee_joined', 16, _(\"When someone I pledge to joins Liberapay\")),\n Event('team_invite', 32, _(\"When someone invites me to join a team\")),\n Event('payin_bankwire_failed', 64, _(\"When a bank wire transfer to my Liberapay wallet fails\")),\n Event('payin_bankwire_succeeded', 128, _(\"When a bank wire transfer to my Liberapay wallet succeeds\")),\n Event('payin_bankwire_expired', 256, _(\"When a bank wire transfer to my Liberapay wallet expires\")),\n Event('payin_directdebit_failed', 512, _(\"When a direct debit from my bank account fails\")),\n Event('payin_directdebit_succeeded', 1024, _(\"When a direct debit from my bank account succeeds\")),\n]\ncheck_bits([e.bit for e in EVENTS])\nEVENTS = OrderedDict((e.name, e) for e in EVENTS)\nEVENTS_S = ' '.join(EVENTS.keys())\n\n# https://www.mangopay.com/pricing/\nFEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), Decimal(0)) # 0.5%\nFEE_PAYIN_CARD = Fees(Decimal('0.018'), Decimal('0.18')) # 1.8% + \u20ac0.18\nFEE_PAYIN_DIRECT_DEBIT = Fees(Decimal(0), Decimal('0.80')) # \u20ac0.80\nFEE_PAYOUT = Fees(Decimal(0), Decimal(0))\nFEE_PAYOUT_OUTSIDE_SEPA = Fees(Decimal(0), Decimal('2.5'))\nFEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%\nFEE_VAT = Decimal('0.17') # 17% (Luxembourg rate)\n\nINVOICE_DOC_MAX_SIZE = 5000000\nINVOICE_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'png']\nINVOICE_DOCS_LIMIT = 10\n\nINVOICE_NATURES = {\n 'expense': _(\"Expense Report\"),\n}\n\nINVOICE_STATUSES = {\n 'pre': _(\"Draft\"),\n 'new': _(\"Sent (awaiting approval)\"),\n 'retracted': _(\"Retracted\"),\n 'accepted': _(\"Accepted (awaiting payment)\"),\n 'paid': _(\"Paid\"),\n 'rejected': _(\"Rejected\"),\n}\n\nJINJA_ENV_COMMON = dict(\n trim_blocks=True, lstrip_blocks=True,\n line_statement_prefix='%',\n # undefined=CustomUndefined,\n)\n\n# https://docs.mangopay.com/api-references/kyc-rules/\nKYC_DOC_MAX_SIZE = 7000000\nKYC_DOC_MAX_SIZE_MB = int(KYC_DOC_MAX_SIZE / 1000000)\nKYC_DOCS_EXTS = ['pdf', 'jpeg', 'jpg', 'gif', 'png']\nKYC_DOCS_EXTS_STR = ', '.join(KYC_DOCS_EXTS)\nKYC_INCOME_THRESHOLDS = (\n (1, 18000),\n (2, 30000),\n (3, 50000),\n (4, 80000),\n (5, 120000),\n (6, 120000),\n)\nKYC_PAYIN_YEARLY_THRESHOLD = Decimal('2500')\nKYC_PAYOUT_YEARLY_THRESHOLD = Decimal('1000')\n\nLAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)\n\nPARTICIPANT_KINDS = {\n 'individual': _(\"Individual\"),\n 'organization': _(\"Organization\"),\n 'group': _(\"Team\"),\n}\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\nPAYIN_BANK_WIRE_MIN = Decimal('2.00') # fee \u2248 0.99%\nPAYIN_BANK_WIRE_TARGET = Decimal('5.00') # fee \u2248 0.6%\nPAYIN_CARD_MIN = Decimal(\"15.00\") # fee \u2248 3.5%\nPAYIN_CARD_TARGET = Decimal(\"92.00\") # fee \u2248 2.33%\nPAYIN_DIRECT_DEBIT_MIN = Decimal('25.00') # fee \u2248 3.6%\nPAYIN_DIRECT_DEBIT_TARGET = Decimal('99.00') # fee \u2248 0.94%\n\nPERIOD_CONVERSION_RATES = {\n 'weekly': Decimal(1),\n 'monthly': Decimal(12) / Decimal(52),\n 'yearly': Decimal(1) / Decimal(52),\n}\n\nPOSTAL_ADDRESS_KEYS = (\n 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'\n)\n\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results on Liberapay.\")),\n ('profile_noindex', _(\"Tell web search engines not to index my profile.\")),\n ('hide_from_lists', _(\"Prevent my profile from being listed on Liberapay.\")),\n])\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nPRIVILEGES = dict(admin=1, run_payday=2)\ncheck_bits(list(PRIVILEGES.values()))\n\nQUARANTINE = timedelta(weeks=4)\n\nRATE_LIMITS = {\n 'add_email.source': (5, 60*60*24), # 5 per day\n 'add_email.target': (2, 60*60*24), # 2 per day\n 'change_username': (7, 60*60*24*7), # 7 per week\n 'log-in.email': (10, 60*60*24), # 10 per day\n 'log-in.email.not-verified': (2, 60*60*24), # 2 per day\n 'log-in.email.verified': (10, 60*60*24), # 10 per day\n 'log-in.password': (3, 60*60), # 3 per hour\n 'sign-up.ip-addr': (5, 60*60), # 5 per hour per IP address\n 'sign-up.ip-net': (15, 15*60), # 15 per 15 minutes per IP network\n 'sign-up.ip-version': (15, 15*60), # 15 per 15 minutes per IP version\n}\n\nSEPA = set(\"\"\"\n AT BE BG CH CY CZ DE DK EE ES ES FI FR GB GI GR HR HU IE IS IT LI LT LU LV\n MC MT NL NO PL PT RO SE SI SK\n\"\"\".split())\n\nSESSION = str('session') # bytes in python2, unicode in python3\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\n\ndef make_standard_tip(label, weekly):\n return StandardTip(\n label,\n weekly,\n weekly / PERIOD_CONVERSION_RATES['monthly'],\n weekly / PERIOD_CONVERSION_RATES['yearly'],\n )\n\n\nSTANDARD_TIPS = (\n make_standard_tip(_(\"Symbolic\"), Decimal('0.01')),\n make_standard_tip(_(\"Small\"), Decimal('0.25')),\n make_standard_tip(_(\"Medium\"), Decimal('1.00')),\n make_standard_tip(_(\"Large\"), Decimal('5.00')),\n make_standard_tip(_(\"Maximum\"), DONATION_WEEKLY_MAX),\n)\n\nUSERNAME_MAX_SIZE = 32\n\ndel _\n", "path": "liberapay/constants.py"}]} | 3,900 | 87 |
gh_patches_debug_39810 | rasdani/github-patches | git_diff | ray-project__ray-5999 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tune doesn't show timesteps anymore
This makes it not very useful for monitoring RL runs:
```
+--------------------------+----------+----------+-----------+--------+------------------+----------+
| Trial name | ID | status | loc | iter | total time (s) | reward |
|--------------------------+----------+----------+-----------+--------+------------------+----------|
| SAC_Pendulum-v0_79dcd0fe | 79dcd0fe | RUNNING | pid=11031 | 66 | 163.499 | -1105.85 |
+--------------------------+----------+----------+-----------+--------+------------------+----------+
```
cc @richardliaw
</issue>
<code>
[start of python/ray/tune/result.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6
7 # yapf: disable
8 # __sphinx_doc_begin__
9 # (Optional/Auto-filled) training is terminated. Filled only if not provided.
10 DONE = "done"
11
12 # (Optional) Enum for user controlled checkpoint
13 SHOULD_CHECKPOINT = "should_checkpoint"
14
15 # (Auto-filled) The hostname of the machine hosting the training process.
16 HOSTNAME = "hostname"
17
18 # (Auto-filled) The auto-assigned id of the trial.
19 TRIAL_ID = "trial_id"
20
21 # (Auto-filled) The auto-assigned id of the trial.
22 EXPERIMENT_TAG = "experiment_tag"
23
24 # (Auto-filled) The node ip of the machine hosting the training process.
25 NODE_IP = "node_ip"
26
27 # (Auto-filled) The pid of the training process.
28 PID = "pid"
29
30 # (Optional) Mean reward for current training iteration
31 EPISODE_REWARD_MEAN = "episode_reward_mean"
32
33 # (Optional) Mean loss for training iteration
34 MEAN_LOSS = "mean_loss"
35
36 # (Optional) Mean accuracy for training iteration
37 MEAN_ACCURACY = "mean_accuracy"
38
39 # Number of episodes in this iteration.
40 EPISODES_THIS_ITER = "episodes_this_iter"
41
42 # (Optional/Auto-filled) Accumulated number of episodes for this trial.
43 EPISODES_TOTAL = "episodes_total"
44
45 # Number of timesteps in this iteration.
46 TIMESTEPS_THIS_ITER = "timesteps_this_iter"
47
48 # (Auto-filled) Accumulated number of timesteps for this entire trial.
49 TIMESTEPS_TOTAL = "timesteps_total"
50
51 # (Auto-filled) Time in seconds this iteration took to run.
52 # This may be overriden to override the system-computed time difference.
53 TIME_THIS_ITER_S = "time_this_iter_s"
54
55 # (Auto-filled) Accumulated time in seconds for this entire trial.
56 TIME_TOTAL_S = "time_total_s"
57
58 # (Auto-filled) The index of this training iteration.
59 TRAINING_ITERATION = "training_iteration"
60 # __sphinx_doc_end__
61 # yapf: enable
62
63 DEFAULT_EXPERIMENT_INFO_KEYS = ("trainable_name", EXPERIMENT_TAG, TRIAL_ID)
64
65 DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, MEAN_ACCURACY,
66 MEAN_LOSS)
67
68 # __duplicate__ is a magic keyword used internally to
69 # avoid double-logging results when using the Function API.
70 RESULT_DUPLICATE = "__duplicate__"
71
72 # Where Tune writes result files by default
73 DEFAULT_RESULTS_DIR = (os.environ.get("TUNE_RESULT_DIR")
74 or os.path.expanduser("~/ray_results"))
75
76 # Meta file about status under each experiment directory, can be
77 # parsed by automlboard if exists.
78 JOB_META_FILE = "job_status.json"
79
80 # Meta file about status under each trial directory, can be parsed
81 # by automlboard if exists.
82 EXPR_META_FILE = "trial_status.json"
83
84 # File that stores parameters of the trial.
85 EXPR_PARAM_FILE = "params.json"
86
87 # Pickle File that stores parameters of the trial.
88 EXPR_PARAM_PICKLE_FILE = "params.pkl"
89
90 # File that stores the progress of the trial.
91 EXPR_PROGRESS_FILE = "progress.csv"
92
93 # File that stores results of the trial.
94 EXPR_RESULT_FILE = "result.json"
95
96 # Config prefix when using Analysis.
97 CONFIG_PREFIX = "config/"
98
[end of python/ray/tune/result.py]
[start of python/ray/tune/progress_reporter.py]
1 from __future__ import print_function
2
3 import os
4
5 from ray.tune.result import (DEFAULT_RESULT_KEYS, CONFIG_PREFIX, PID,
6 EPISODE_REWARD_MEAN, MEAN_ACCURACY, MEAN_LOSS,
7 HOSTNAME, TRAINING_ITERATION, TIME_TOTAL_S)
8 from ray.tune.util import flatten_dict
9
10 try:
11 from tabulate import tabulate
12 except ImportError:
13 raise ImportError("ray.tune in ray > 0.7.5 requires 'tabulate'. "
14 "Please re-run 'pip install ray[tune]' or "
15 "'pip install ray[rllib]'.")
16
17 DEFAULT_PROGRESS_KEYS = DEFAULT_RESULT_KEYS + (EPISODE_REWARD_MEAN, )
18 # Truncated representations of column names (to accommodate small screens).
19 REPORTED_REPRESENTATIONS = {
20 EPISODE_REWARD_MEAN: "reward",
21 MEAN_ACCURACY: "acc",
22 MEAN_LOSS: "loss",
23 TIME_TOTAL_S: "total time (s)",
24 TRAINING_ITERATION: "iter",
25 }
26
27
28 class ProgressReporter(object):
29 def report(self, trial_runner):
30 """Reports progress across all trials of the trial runner.
31
32 Args:
33 trial_runner: Trial runner to report on.
34 """
35 raise NotImplementedError
36
37
38 class JupyterNotebookReporter(ProgressReporter):
39 def __init__(self, overwrite):
40 """Initializes a new JupyterNotebookReporter.
41
42 Args:
43 overwrite (bool): Flag for overwriting the last reported progress.
44 """
45 self.overwrite = overwrite
46
47 def report(self, trial_runner):
48 delim = "<br>"
49 messages = [
50 "== Status ==",
51 memory_debug_str(),
52 trial_runner.debug_string(delim=delim),
53 trial_progress_str(trial_runner.get_trials(), fmt="html")
54 ]
55 from IPython.display import clear_output
56 from IPython.core.display import display, HTML
57 if self.overwrite:
58 clear_output(wait=True)
59 display(HTML(delim.join(messages) + delim))
60
61
62 class CLIReporter(ProgressReporter):
63 def report(self, trial_runner):
64 messages = [
65 "== Status ==",
66 memory_debug_str(),
67 trial_runner.debug_string(),
68 trial_progress_str(trial_runner.get_trials())
69 ]
70 print("\n".join(messages) + "\n")
71
72
73 def memory_debug_str():
74 try:
75 import psutil
76 total_gb = psutil.virtual_memory().total / (1024**3)
77 used_gb = total_gb - psutil.virtual_memory().available / (1024**3)
78 if used_gb > total_gb * 0.9:
79 warn = (": ***LOW MEMORY*** less than 10% of the memory on "
80 "this node is available for use. This can cause "
81 "unexpected crashes. Consider "
82 "reducing the memory used by your application "
83 "or reducing the Ray object store size by setting "
84 "`object_store_memory` when calling `ray.init`.")
85 else:
86 warn = ""
87 return "Memory usage on this node: {}/{} GiB{}".format(
88 round(used_gb, 1), round(total_gb, 1), warn)
89 except ImportError:
90 return ("Unknown memory usage. Please run `pip install psutil` "
91 "(or ray[debug]) to resolve)")
92
93
94 def trial_progress_str(trials, metrics=None, fmt="psql", max_rows=100):
95 """Returns a human readable message for printing to the console.
96
97 This contains a table where each row represents a trial, its parameters
98 and the current values of its metrics.
99
100 Args:
101 trials (List[Trial]): List of trials to get progress string for.
102 metrics (List[str]): Names of metrics to include. Defaults to
103 metrics defined in DEFAULT_RESULT_KEYS.
104 fmt (str): Output format (see tablefmt in tabulate API).
105 max_rows (int): Maximum number of rows in the trial table.
106 """
107 messages = []
108 delim = "<br>" if fmt == "html" else "\n"
109 if len(trials) < 1:
110 return delim.join(messages)
111
112 num_trials = len(trials)
113 trials_per_state = {}
114 for t in trials:
115 trials_per_state[t.status] = trials_per_state.get(t.status, 0) + 1
116 messages.append("Number of trials: {} ({})".format(num_trials,
117 trials_per_state))
118 for local_dir in sorted({t.local_dir for t in trials}):
119 messages.append("Result logdir: {}".format(local_dir))
120
121 if num_trials > max_rows:
122 overflow = num_trials - max_rows
123 # TODO(ujvl): suggestion for users to view more rows.
124 messages.append("Table truncated to {} rows ({} overflow).".format(
125 max_rows, overflow))
126
127 # Pre-process trials to figure out what columns to show.
128 keys = list(metrics or DEFAULT_PROGRESS_KEYS)
129 keys = [k for k in keys if any(t.last_result.get(k) for t in trials)]
130 has_failed = any(t.error_file for t in trials)
131 # Build rows.
132 trial_table = []
133 params = list(set().union(*[t.evaluated_params for t in trials]))
134 for trial in trials[:min(num_trials, max_rows)]:
135 trial_table.append(_get_trial_info(trial, params, keys, has_failed))
136 # Parse columns.
137 parsed_columns = [REPORTED_REPRESENTATIONS.get(k, k) for k in keys]
138 columns = ["Trial name", "ID", "status", "loc"]
139 columns += ["failures", "error file"] if has_failed else []
140 columns += params + parsed_columns
141 messages.append(
142 tabulate(trial_table, headers=columns, tablefmt=fmt, showindex=False))
143 return delim.join(messages)
144
145
146 def _get_trial_info(trial, parameters, metrics, include_error_data=False):
147 """Returns the following information about a trial:
148
149 name | ID | status | loc | # failures | error_file | params... | metrics...
150
151 Args:
152 trial (Trial): Trial to get information for.
153 parameters (List[str]): Names of trial parameters to include.
154 metrics (List[str]): Names of metrics to include.
155 include_error_data (bool): Include error file and # of failures.
156 """
157 result = flatten_dict(trial.last_result)
158 trial_info = [str(trial), trial.trial_id, trial.status]
159 trial_info += [_location_str(result.get(HOSTNAME), result.get(PID))]
160 if include_error_data:
161 # TODO(ujvl): File path is too long to display in a single row.
162 trial_info += [trial.num_failures, trial.error_file]
163 trial_info += [result.get(CONFIG_PREFIX + param) for param in parameters]
164 trial_info += [result.get(metric) for metric in metrics]
165 return trial_info
166
167
168 def _location_str(hostname, pid):
169 if not pid:
170 return ""
171 elif hostname == os.uname()[1]:
172 return "pid={}".format(pid)
173 else:
174 return "{}:{}".format(hostname, pid)
175
[end of python/ray/tune/progress_reporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/tune/progress_reporter.py b/python/ray/tune/progress_reporter.py
--- a/python/ray/tune/progress_reporter.py
+++ b/python/ray/tune/progress_reporter.py
@@ -4,7 +4,8 @@
from ray.tune.result import (DEFAULT_RESULT_KEYS, CONFIG_PREFIX, PID,
EPISODE_REWARD_MEAN, MEAN_ACCURACY, MEAN_LOSS,
- HOSTNAME, TRAINING_ITERATION, TIME_TOTAL_S)
+ HOSTNAME, TRAINING_ITERATION, TIME_TOTAL_S,
+ TIMESTEPS_TOTAL)
from ray.tune.util import flatten_dict
try:
@@ -21,6 +22,7 @@
MEAN_ACCURACY: "acc",
MEAN_LOSS: "loss",
TIME_TOTAL_S: "total time (s)",
+ TIMESTEPS_TOTAL: "timesteps",
TRAINING_ITERATION: "iter",
}
@@ -135,7 +137,7 @@
trial_table.append(_get_trial_info(trial, params, keys, has_failed))
# Parse columns.
parsed_columns = [REPORTED_REPRESENTATIONS.get(k, k) for k in keys]
- columns = ["Trial name", "ID", "status", "loc"]
+ columns = ["Trial name", "status", "loc"]
columns += ["failures", "error file"] if has_failed else []
columns += params + parsed_columns
messages.append(
@@ -146,7 +148,7 @@
def _get_trial_info(trial, parameters, metrics, include_error_data=False):
"""Returns the following information about a trial:
- name | ID | status | loc | # failures | error_file | params... | metrics...
+ name | status | loc | # failures | error_file | params... | metrics...
Args:
trial (Trial): Trial to get information for.
@@ -155,7 +157,7 @@
include_error_data (bool): Include error file and # of failures.
"""
result = flatten_dict(trial.last_result)
- trial_info = [str(trial), trial.trial_id, trial.status]
+ trial_info = [str(trial), trial.status]
trial_info += [_location_str(result.get(HOSTNAME), result.get(PID))]
if include_error_data:
# TODO(ujvl): File path is too long to display in a single row.
diff --git a/python/ray/tune/result.py b/python/ray/tune/result.py
--- a/python/ray/tune/result.py
+++ b/python/ray/tune/result.py
@@ -62,8 +62,8 @@
DEFAULT_EXPERIMENT_INFO_KEYS = ("trainable_name", EXPERIMENT_TAG, TRIAL_ID)
-DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, MEAN_ACCURACY,
- MEAN_LOSS)
+DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL,
+ MEAN_ACCURACY, MEAN_LOSS)
# __duplicate__ is a magic keyword used internally to
# avoid double-logging results when using the Function API.
| {"golden_diff": "diff --git a/python/ray/tune/progress_reporter.py b/python/ray/tune/progress_reporter.py\n--- a/python/ray/tune/progress_reporter.py\n+++ b/python/ray/tune/progress_reporter.py\n@@ -4,7 +4,8 @@\n \n from ray.tune.result import (DEFAULT_RESULT_KEYS, CONFIG_PREFIX, PID,\n EPISODE_REWARD_MEAN, MEAN_ACCURACY, MEAN_LOSS,\n- HOSTNAME, TRAINING_ITERATION, TIME_TOTAL_S)\n+ HOSTNAME, TRAINING_ITERATION, TIME_TOTAL_S,\n+ TIMESTEPS_TOTAL)\n from ray.tune.util import flatten_dict\n \n try:\n@@ -21,6 +22,7 @@\n MEAN_ACCURACY: \"acc\",\n MEAN_LOSS: \"loss\",\n TIME_TOTAL_S: \"total time (s)\",\n+ TIMESTEPS_TOTAL: \"timesteps\",\n TRAINING_ITERATION: \"iter\",\n }\n \n@@ -135,7 +137,7 @@\n trial_table.append(_get_trial_info(trial, params, keys, has_failed))\n # Parse columns.\n parsed_columns = [REPORTED_REPRESENTATIONS.get(k, k) for k in keys]\n- columns = [\"Trial name\", \"ID\", \"status\", \"loc\"]\n+ columns = [\"Trial name\", \"status\", \"loc\"]\n columns += [\"failures\", \"error file\"] if has_failed else []\n columns += params + parsed_columns\n messages.append(\n@@ -146,7 +148,7 @@\n def _get_trial_info(trial, parameters, metrics, include_error_data=False):\n \"\"\"Returns the following information about a trial:\n \n- name | ID | status | loc | # failures | error_file | params... | metrics...\n+ name | status | loc | # failures | error_file | params... | metrics...\n \n Args:\n trial (Trial): Trial to get information for.\n@@ -155,7 +157,7 @@\n include_error_data (bool): Include error file and # of failures.\n \"\"\"\n result = flatten_dict(trial.last_result)\n- trial_info = [str(trial), trial.trial_id, trial.status]\n+ trial_info = [str(trial), trial.status]\n trial_info += [_location_str(result.get(HOSTNAME), result.get(PID))]\n if include_error_data:\n # TODO(ujvl): File path is too long to display in a single row.\ndiff --git a/python/ray/tune/result.py b/python/ray/tune/result.py\n--- a/python/ray/tune/result.py\n+++ b/python/ray/tune/result.py\n@@ -62,8 +62,8 @@\n \n DEFAULT_EXPERIMENT_INFO_KEYS = (\"trainable_name\", EXPERIMENT_TAG, TRIAL_ID)\n \n-DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, MEAN_ACCURACY,\n- MEAN_LOSS)\n+DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL,\n+ MEAN_ACCURACY, MEAN_LOSS)\n \n # __duplicate__ is a magic keyword used internally to\n # avoid double-logging results when using the Function API.\n", "issue": "Tune doesn't show timesteps anymore\nThis makes it not very useful for monitoring RL runs:\r\n```\r\n+--------------------------+----------+----------+-----------+--------+------------------+----------+\r\n| Trial name | ID | status | loc | iter | total time (s) | reward |\r\n|--------------------------+----------+----------+-----------+--------+------------------+----------|\r\n| SAC_Pendulum-v0_79dcd0fe | 79dcd0fe | RUNNING | pid=11031 | 66 | 163.499 | -1105.85 |\r\n+--------------------------+----------+----------+-----------+--------+------------------+----------+\r\n```\r\n\r\ncc @richardliaw \n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n# yapf: disable\n# __sphinx_doc_begin__\n# (Optional/Auto-filled) training is terminated. Filled only if not provided.\nDONE = \"done\"\n\n# (Optional) Enum for user controlled checkpoint\nSHOULD_CHECKPOINT = \"should_checkpoint\"\n\n# (Auto-filled) The hostname of the machine hosting the training process.\nHOSTNAME = \"hostname\"\n\n# (Auto-filled) The auto-assigned id of the trial.\nTRIAL_ID = \"trial_id\"\n\n# (Auto-filled) The auto-assigned id of the trial.\nEXPERIMENT_TAG = \"experiment_tag\"\n\n# (Auto-filled) The node ip of the machine hosting the training process.\nNODE_IP = \"node_ip\"\n\n# (Auto-filled) The pid of the training process.\nPID = \"pid\"\n\n# (Optional) Mean reward for current training iteration\nEPISODE_REWARD_MEAN = \"episode_reward_mean\"\n\n# (Optional) Mean loss for training iteration\nMEAN_LOSS = \"mean_loss\"\n\n# (Optional) Mean accuracy for training iteration\nMEAN_ACCURACY = \"mean_accuracy\"\n\n# Number of episodes in this iteration.\nEPISODES_THIS_ITER = \"episodes_this_iter\"\n\n# (Optional/Auto-filled) Accumulated number of episodes for this trial.\nEPISODES_TOTAL = \"episodes_total\"\n\n# Number of timesteps in this iteration.\nTIMESTEPS_THIS_ITER = \"timesteps_this_iter\"\n\n# (Auto-filled) Accumulated number of timesteps for this entire trial.\nTIMESTEPS_TOTAL = \"timesteps_total\"\n\n# (Auto-filled) Time in seconds this iteration took to run.\n# This may be overriden to override the system-computed time difference.\nTIME_THIS_ITER_S = \"time_this_iter_s\"\n\n# (Auto-filled) Accumulated time in seconds for this entire trial.\nTIME_TOTAL_S = \"time_total_s\"\n\n# (Auto-filled) The index of this training iteration.\nTRAINING_ITERATION = \"training_iteration\"\n# __sphinx_doc_end__\n# yapf: enable\n\nDEFAULT_EXPERIMENT_INFO_KEYS = (\"trainable_name\", EXPERIMENT_TAG, TRIAL_ID)\n\nDEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, MEAN_ACCURACY,\n MEAN_LOSS)\n\n# __duplicate__ is a magic keyword used internally to\n# avoid double-logging results when using the Function API.\nRESULT_DUPLICATE = \"__duplicate__\"\n\n# Where Tune writes result files by default\nDEFAULT_RESULTS_DIR = (os.environ.get(\"TUNE_RESULT_DIR\")\n or os.path.expanduser(\"~/ray_results\"))\n\n# Meta file about status under each experiment directory, can be\n# parsed by automlboard if exists.\nJOB_META_FILE = \"job_status.json\"\n\n# Meta file about status under each trial directory, can be parsed\n# by automlboard if exists.\nEXPR_META_FILE = \"trial_status.json\"\n\n# File that stores parameters of the trial.\nEXPR_PARAM_FILE = \"params.json\"\n\n# Pickle File that stores parameters of the trial.\nEXPR_PARAM_PICKLE_FILE = \"params.pkl\"\n\n# File that stores the progress of the trial.\nEXPR_PROGRESS_FILE = \"progress.csv\"\n\n# File that stores results of the trial.\nEXPR_RESULT_FILE = \"result.json\"\n\n# Config prefix when using Analysis.\nCONFIG_PREFIX = \"config/\"\n", "path": "python/ray/tune/result.py"}, {"content": "from __future__ import print_function\n\nimport os\n\nfrom ray.tune.result import (DEFAULT_RESULT_KEYS, CONFIG_PREFIX, PID,\n EPISODE_REWARD_MEAN, MEAN_ACCURACY, MEAN_LOSS,\n HOSTNAME, TRAINING_ITERATION, TIME_TOTAL_S)\nfrom ray.tune.util import flatten_dict\n\ntry:\n from tabulate import tabulate\nexcept ImportError:\n raise ImportError(\"ray.tune in ray > 0.7.5 requires 'tabulate'. \"\n \"Please re-run 'pip install ray[tune]' or \"\n \"'pip install ray[rllib]'.\")\n\nDEFAULT_PROGRESS_KEYS = DEFAULT_RESULT_KEYS + (EPISODE_REWARD_MEAN, )\n# Truncated representations of column names (to accommodate small screens).\nREPORTED_REPRESENTATIONS = {\n EPISODE_REWARD_MEAN: \"reward\",\n MEAN_ACCURACY: \"acc\",\n MEAN_LOSS: \"loss\",\n TIME_TOTAL_S: \"total time (s)\",\n TRAINING_ITERATION: \"iter\",\n}\n\n\nclass ProgressReporter(object):\n def report(self, trial_runner):\n \"\"\"Reports progress across all trials of the trial runner.\n\n Args:\n trial_runner: Trial runner to report on.\n \"\"\"\n raise NotImplementedError\n\n\nclass JupyterNotebookReporter(ProgressReporter):\n def __init__(self, overwrite):\n \"\"\"Initializes a new JupyterNotebookReporter.\n\n Args:\n overwrite (bool): Flag for overwriting the last reported progress.\n \"\"\"\n self.overwrite = overwrite\n\n def report(self, trial_runner):\n delim = \"<br>\"\n messages = [\n \"== Status ==\",\n memory_debug_str(),\n trial_runner.debug_string(delim=delim),\n trial_progress_str(trial_runner.get_trials(), fmt=\"html\")\n ]\n from IPython.display import clear_output\n from IPython.core.display import display, HTML\n if self.overwrite:\n clear_output(wait=True)\n display(HTML(delim.join(messages) + delim))\n\n\nclass CLIReporter(ProgressReporter):\n def report(self, trial_runner):\n messages = [\n \"== Status ==\",\n memory_debug_str(),\n trial_runner.debug_string(),\n trial_progress_str(trial_runner.get_trials())\n ]\n print(\"\\n\".join(messages) + \"\\n\")\n\n\ndef memory_debug_str():\n try:\n import psutil\n total_gb = psutil.virtual_memory().total / (1024**3)\n used_gb = total_gb - psutil.virtual_memory().available / (1024**3)\n if used_gb > total_gb * 0.9:\n warn = (\": ***LOW MEMORY*** less than 10% of the memory on \"\n \"this node is available for use. This can cause \"\n \"unexpected crashes. Consider \"\n \"reducing the memory used by your application \"\n \"or reducing the Ray object store size by setting \"\n \"`object_store_memory` when calling `ray.init`.\")\n else:\n warn = \"\"\n return \"Memory usage on this node: {}/{} GiB{}\".format(\n round(used_gb, 1), round(total_gb, 1), warn)\n except ImportError:\n return (\"Unknown memory usage. Please run `pip install psutil` \"\n \"(or ray[debug]) to resolve)\")\n\n\ndef trial_progress_str(trials, metrics=None, fmt=\"psql\", max_rows=100):\n \"\"\"Returns a human readable message for printing to the console.\n\n This contains a table where each row represents a trial, its parameters\n and the current values of its metrics.\n\n Args:\n trials (List[Trial]): List of trials to get progress string for.\n metrics (List[str]): Names of metrics to include. Defaults to\n metrics defined in DEFAULT_RESULT_KEYS.\n fmt (str): Output format (see tablefmt in tabulate API).\n max_rows (int): Maximum number of rows in the trial table.\n \"\"\"\n messages = []\n delim = \"<br>\" if fmt == \"html\" else \"\\n\"\n if len(trials) < 1:\n return delim.join(messages)\n\n num_trials = len(trials)\n trials_per_state = {}\n for t in trials:\n trials_per_state[t.status] = trials_per_state.get(t.status, 0) + 1\n messages.append(\"Number of trials: {} ({})\".format(num_trials,\n trials_per_state))\n for local_dir in sorted({t.local_dir for t in trials}):\n messages.append(\"Result logdir: {}\".format(local_dir))\n\n if num_trials > max_rows:\n overflow = num_trials - max_rows\n # TODO(ujvl): suggestion for users to view more rows.\n messages.append(\"Table truncated to {} rows ({} overflow).\".format(\n max_rows, overflow))\n\n # Pre-process trials to figure out what columns to show.\n keys = list(metrics or DEFAULT_PROGRESS_KEYS)\n keys = [k for k in keys if any(t.last_result.get(k) for t in trials)]\n has_failed = any(t.error_file for t in trials)\n # Build rows.\n trial_table = []\n params = list(set().union(*[t.evaluated_params for t in trials]))\n for trial in trials[:min(num_trials, max_rows)]:\n trial_table.append(_get_trial_info(trial, params, keys, has_failed))\n # Parse columns.\n parsed_columns = [REPORTED_REPRESENTATIONS.get(k, k) for k in keys]\n columns = [\"Trial name\", \"ID\", \"status\", \"loc\"]\n columns += [\"failures\", \"error file\"] if has_failed else []\n columns += params + parsed_columns\n messages.append(\n tabulate(trial_table, headers=columns, tablefmt=fmt, showindex=False))\n return delim.join(messages)\n\n\ndef _get_trial_info(trial, parameters, metrics, include_error_data=False):\n \"\"\"Returns the following information about a trial:\n\n name | ID | status | loc | # failures | error_file | params... | metrics...\n\n Args:\n trial (Trial): Trial to get information for.\n parameters (List[str]): Names of trial parameters to include.\n metrics (List[str]): Names of metrics to include.\n include_error_data (bool): Include error file and # of failures.\n \"\"\"\n result = flatten_dict(trial.last_result)\n trial_info = [str(trial), trial.trial_id, trial.status]\n trial_info += [_location_str(result.get(HOSTNAME), result.get(PID))]\n if include_error_data:\n # TODO(ujvl): File path is too long to display in a single row.\n trial_info += [trial.num_failures, trial.error_file]\n trial_info += [result.get(CONFIG_PREFIX + param) for param in parameters]\n trial_info += [result.get(metric) for metric in metrics]\n return trial_info\n\n\ndef _location_str(hostname, pid):\n if not pid:\n return \"\"\n elif hostname == os.uname()[1]:\n return \"pid={}\".format(pid)\n else:\n return \"{}:{}\".format(hostname, pid)\n", "path": "python/ray/tune/progress_reporter.py"}]} | 3,586 | 696 |
gh_patches_debug_24637 | rasdani/github-patches | git_diff | modin-project__modin-2950 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `show_history` option to Spreadsheet API
Changes were made to the `modin_spreadsheet` API `show_grid`. Make corresponding changes on Spreadsheet API `from_dataframe`.
</issue>
<code>
[start of modin/spreadsheet/general.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 from .. import pandas as pd
15 from modin_spreadsheet import show_grid, SpreadsheetWidget
16
17
18 def from_dataframe(
19 dataframe,
20 show_toolbar=None,
21 precision=None,
22 grid_options=None,
23 column_options=None,
24 column_definitions=None,
25 row_edit_callback=None,
26 ):
27 """
28 Renders a DataFrame or Series as an interactive spreadsheet, represented by
29 an instance of the ``SpreadsheetWidget`` class. The ``SpreadsheetWidget`` instance
30 is constructed using the options passed in to this function. The
31 ``dataframe`` argument to this function is used as the ``df`` kwarg in
32 call to the SpreadsheetWidget constructor, and the rest of the parameters
33 are passed through as is.
34
35 If the ``dataframe`` argument is a Series, it will be converted to a
36 DataFrame before being passed in to the SpreadsheetWidget constructor as the
37 ``df`` kwarg.
38
39 :rtype: SpreadsheetWidget
40
41 Parameters
42 ----------
43 dataframe : DataFrame
44 The DataFrame that will be displayed by this instance of
45 SpreadsheetWidget.
46 grid_options : dict
47 Options to use when creating the SlickGrid control (i.e. the
48 interactive grid). See the Notes section below for more information
49 on the available options, as well as the default options that this
50 widget uses.
51 precision : integer
52 The number of digits of precision to display for floating-point
53 values. If unset, we use the value of
54 `pandas.get_option('display.precision')`.
55 show_toolbar : bool
56 Whether to show a toolbar with options for adding/removing rows.
57 Adding/removing rows is an experimental feature which only works
58 with DataFrames that have an integer index.
59 column_options : dict
60 Column options that are to be applied to every column. See the
61 Notes section below for more information on the available options,
62 as well as the default options that this widget uses.
63 column_definitions : dict
64 Column options that are to be applied to individual
65 columns. The keys of the dict should be the column names, and each
66 value should be the column options for a particular column,
67 represented as a dict. The available options for each column are the
68 same options that are available to be set for all columns via the
69 ``column_options`` parameter. See the Notes section below for more
70 information on those options.
71 row_edit_callback : callable
72 A callable that is called to determine whether a particular row
73 should be editable or not. Its signature should be
74 ``callable(row)``, where ``row`` is a dictionary which contains a
75 particular row's values, keyed by column name. The callback should
76 return True if the provided row should be editable, and False
77 otherwise.
78
79
80 Notes
81 -----
82 The following dictionary is used for ``grid_options`` if none are
83 provided explicitly::
84
85 {
86 # SlickGrid options
87 'fullWidthRows': True,
88 'syncColumnCellResize': True,
89 'forceFitColumns': False,
90 'defaultColumnWidth': 150,
91 'rowHeight': 28,
92 'enableColumnReorder': False,
93 'enableTextSelectionOnCells': True,
94 'editable': True,
95 'autoEdit': False,
96 'explicitInitialization': True,
97
98 # Modin-spreadsheet options
99 'maxVisibleRows': 15,
100 'minVisibleRows': 8,
101 'sortable': True,
102 'filterable': True,
103 'highlightSelectedCell': False,
104 'highlightSelectedRow': True
105 }
106
107 The first group of options are SlickGrid "grid options" which are
108 described in the `SlickGrid documentation
109 <https://github.com/mleibman/SlickGrid/wiki/Grid-Options>`_.
110
111 The second group of option are options that were added specifically
112 for modin-spreadsheet and therefore are not documented in the SlickGrid documentation.
113 The following bullet points describe these options.
114
115 * **maxVisibleRows** The maximum number of rows that modin-spreadsheet will show.
116 * **minVisibleRows** The minimum number of rows that modin-spreadsheet will show
117 * **sortable** Whether the modin-spreadsheet instance will allow the user to sort
118 columns by clicking the column headers. When this is set to ``False``,
119 nothing will happen when users click the column headers.
120 * **filterable** Whether the modin-spreadsheet instance will allow the user to filter
121 the grid. When this is set to ``False`` the filter icons won't be shown
122 for any columns.
123 * **highlightSelectedCell** If you set this to True, the selected cell
124 will be given a light blue border.
125 * **highlightSelectedRow** If you set this to False, the light blue
126 background that's shown by default for selected rows will be hidden.
127
128 The following dictionary is used for ``column_options`` if none are
129 provided explicitly::
130
131 {
132 # SlickGrid column options
133 'defaultSortAsc': True,
134 'maxWidth': None,
135 'minWidth': 30,
136 'resizable': True,
137 'sortable': True,
138 'toolTip': "",
139 'width': None
140
141 # Modin-spreadsheet column options
142 'editable': True,
143 }
144
145 The first group of options are SlickGrid "column options" which are
146 described in the `SlickGrid documentation
147 <https://github.com/mleibman/SlickGrid/wiki/Column-Options>`_.
148
149 The ``editable`` option was added specifically for modin-spreadsheet and therefore is
150 not documented in the SlickGrid documentation. This option specifies
151 whether a column should be editable or not.
152
153 See Also
154 --------
155 set_defaults : Permanently set global defaults for the parameters
156 of ``show_grid``, with the exception of the ``dataframe``
157 and ``column_definitions`` parameters, since those
158 depend on the particular set of data being shown by an
159 instance, and therefore aren't parameters we would want
160 to set for all SpreadsheetWidget instances.
161 set_grid_option : Permanently set global defaults for individual
162 grid options. Does so by changing the defaults
163 that the ``show_grid`` method uses for the
164 ``grid_options`` parameter.
165 SpreadsheetWidget : The widget class that is instantiated and returned by this
166 method.
167
168 """
169 if not isinstance(dataframe, pd.DataFrame):
170 raise TypeError("dataframe must be modin.DataFrame, not %s" % type(dataframe))
171 return show_grid(
172 dataframe,
173 show_toolbar,
174 precision,
175 grid_options,
176 column_options,
177 column_definitions,
178 row_edit_callback,
179 )
180
181
182 def to_dataframe(spreadsheet):
183 """
184 Get a copy of the DataFrame that reflects the current state of the ``spreadsheet`` SpreadsheetWidget instance UI.
185 This includes any sorting or filtering changes, as well as edits
186 that have been made by double clicking cells.
187
188 :rtype: DataFrame
189
190 Parameters
191 ----------
192 spreadsheet : SpreadsheetWidget
193 The SpreadsheetWidget instance that DataFrame that will be displayed by this instance of
194 SpreadsheetWidget.
195 """
196 if not isinstance(spreadsheet, SpreadsheetWidget):
197 raise TypeError(
198 "spreadsheet must be modin_spreadsheet.SpreadsheetWidget, not %s"
199 % type(spreadsheet)
200 )
201 return spreadsheet.get_changed_df()
202
[end of modin/spreadsheet/general.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/spreadsheet/general.py b/modin/spreadsheet/general.py
--- a/modin/spreadsheet/general.py
+++ b/modin/spreadsheet/general.py
@@ -18,6 +18,7 @@
def from_dataframe(
dataframe,
show_toolbar=None,
+ show_history=None,
precision=None,
grid_options=None,
column_options=None,
@@ -56,6 +57,9 @@
Whether to show a toolbar with options for adding/removing rows.
Adding/removing rows is an experimental feature which only works
with DataFrames that have an integer index.
+ show_history : bool
+ Whether to show the cell containing the spreadsheet transformation
+ history.
column_options : dict
Column options that are to be applied to every column. See the
Notes section below for more information on the available options,
@@ -171,6 +175,7 @@
return show_grid(
dataframe,
show_toolbar,
+ show_history,
precision,
grid_options,
column_options,
| {"golden_diff": "diff --git a/modin/spreadsheet/general.py b/modin/spreadsheet/general.py\n--- a/modin/spreadsheet/general.py\n+++ b/modin/spreadsheet/general.py\n@@ -18,6 +18,7 @@\n def from_dataframe(\n dataframe,\n show_toolbar=None,\n+ show_history=None,\n precision=None,\n grid_options=None,\n column_options=None,\n@@ -56,6 +57,9 @@\n Whether to show a toolbar with options for adding/removing rows.\n Adding/removing rows is an experimental feature which only works\n with DataFrames that have an integer index.\n+ show_history : bool\n+ Whether to show the cell containing the spreadsheet transformation\n+ history.\n column_options : dict\n Column options that are to be applied to every column. See the\n Notes section below for more information on the available options,\n@@ -171,6 +175,7 @@\n return show_grid(\n dataframe,\n show_toolbar,\n+ show_history,\n precision,\n grid_options,\n column_options,\n", "issue": "Add `show_history` option to Spreadsheet API\nChanges were made to the `modin_spreadsheet` API `show_grid`. Make corresponding changes on Spreadsheet API `from_dataframe`.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom .. import pandas as pd\nfrom modin_spreadsheet import show_grid, SpreadsheetWidget\n\n\ndef from_dataframe(\n dataframe,\n show_toolbar=None,\n precision=None,\n grid_options=None,\n column_options=None,\n column_definitions=None,\n row_edit_callback=None,\n):\n \"\"\"\n Renders a DataFrame or Series as an interactive spreadsheet, represented by\n an instance of the ``SpreadsheetWidget`` class. The ``SpreadsheetWidget`` instance\n is constructed using the options passed in to this function. The\n ``dataframe`` argument to this function is used as the ``df`` kwarg in\n call to the SpreadsheetWidget constructor, and the rest of the parameters\n are passed through as is.\n\n If the ``dataframe`` argument is a Series, it will be converted to a\n DataFrame before being passed in to the SpreadsheetWidget constructor as the\n ``df`` kwarg.\n\n :rtype: SpreadsheetWidget\n\n Parameters\n ----------\n dataframe : DataFrame\n The DataFrame that will be displayed by this instance of\n SpreadsheetWidget.\n grid_options : dict\n Options to use when creating the SlickGrid control (i.e. the\n interactive grid). See the Notes section below for more information\n on the available options, as well as the default options that this\n widget uses.\n precision : integer\n The number of digits of precision to display for floating-point\n values. If unset, we use the value of\n `pandas.get_option('display.precision')`.\n show_toolbar : bool\n Whether to show a toolbar with options for adding/removing rows.\n Adding/removing rows is an experimental feature which only works\n with DataFrames that have an integer index.\n column_options : dict\n Column options that are to be applied to every column. See the\n Notes section below for more information on the available options,\n as well as the default options that this widget uses.\n column_definitions : dict\n Column options that are to be applied to individual\n columns. The keys of the dict should be the column names, and each\n value should be the column options for a particular column,\n represented as a dict. The available options for each column are the\n same options that are available to be set for all columns via the\n ``column_options`` parameter. See the Notes section below for more\n information on those options.\n row_edit_callback : callable\n A callable that is called to determine whether a particular row\n should be editable or not. Its signature should be\n ``callable(row)``, where ``row`` is a dictionary which contains a\n particular row's values, keyed by column name. The callback should\n return True if the provided row should be editable, and False\n otherwise.\n\n\n Notes\n -----\n The following dictionary is used for ``grid_options`` if none are\n provided explicitly::\n\n {\n # SlickGrid options\n 'fullWidthRows': True,\n 'syncColumnCellResize': True,\n 'forceFitColumns': False,\n 'defaultColumnWidth': 150,\n 'rowHeight': 28,\n 'enableColumnReorder': False,\n 'enableTextSelectionOnCells': True,\n 'editable': True,\n 'autoEdit': False,\n 'explicitInitialization': True,\n\n # Modin-spreadsheet options\n 'maxVisibleRows': 15,\n 'minVisibleRows': 8,\n 'sortable': True,\n 'filterable': True,\n 'highlightSelectedCell': False,\n 'highlightSelectedRow': True\n }\n\n The first group of options are SlickGrid \"grid options\" which are\n described in the `SlickGrid documentation\n <https://github.com/mleibman/SlickGrid/wiki/Grid-Options>`_.\n\n The second group of option are options that were added specifically\n for modin-spreadsheet and therefore are not documented in the SlickGrid documentation.\n The following bullet points describe these options.\n\n * **maxVisibleRows** The maximum number of rows that modin-spreadsheet will show.\n * **minVisibleRows** The minimum number of rows that modin-spreadsheet will show\n * **sortable** Whether the modin-spreadsheet instance will allow the user to sort\n columns by clicking the column headers. When this is set to ``False``,\n nothing will happen when users click the column headers.\n * **filterable** Whether the modin-spreadsheet instance will allow the user to filter\n the grid. When this is set to ``False`` the filter icons won't be shown\n for any columns.\n * **highlightSelectedCell** If you set this to True, the selected cell\n will be given a light blue border.\n * **highlightSelectedRow** If you set this to False, the light blue\n background that's shown by default for selected rows will be hidden.\n\n The following dictionary is used for ``column_options`` if none are\n provided explicitly::\n\n {\n # SlickGrid column options\n 'defaultSortAsc': True,\n 'maxWidth': None,\n 'minWidth': 30,\n 'resizable': True,\n 'sortable': True,\n 'toolTip': \"\",\n 'width': None\n\n # Modin-spreadsheet column options\n 'editable': True,\n }\n\n The first group of options are SlickGrid \"column options\" which are\n described in the `SlickGrid documentation\n <https://github.com/mleibman/SlickGrid/wiki/Column-Options>`_.\n\n The ``editable`` option was added specifically for modin-spreadsheet and therefore is\n not documented in the SlickGrid documentation. This option specifies\n whether a column should be editable or not.\n\n See Also\n --------\n set_defaults : Permanently set global defaults for the parameters\n of ``show_grid``, with the exception of the ``dataframe``\n and ``column_definitions`` parameters, since those\n depend on the particular set of data being shown by an\n instance, and therefore aren't parameters we would want\n to set for all SpreadsheetWidget instances.\n set_grid_option : Permanently set global defaults for individual\n grid options. Does so by changing the defaults\n that the ``show_grid`` method uses for the\n ``grid_options`` parameter.\n SpreadsheetWidget : The widget class that is instantiated and returned by this\n method.\n\n \"\"\"\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError(\"dataframe must be modin.DataFrame, not %s\" % type(dataframe))\n return show_grid(\n dataframe,\n show_toolbar,\n precision,\n grid_options,\n column_options,\n column_definitions,\n row_edit_callback,\n )\n\n\ndef to_dataframe(spreadsheet):\n \"\"\"\n Get a copy of the DataFrame that reflects the current state of the ``spreadsheet`` SpreadsheetWidget instance UI.\n This includes any sorting or filtering changes, as well as edits\n that have been made by double clicking cells.\n\n :rtype: DataFrame\n\n Parameters\n ----------\n spreadsheet : SpreadsheetWidget\n The SpreadsheetWidget instance that DataFrame that will be displayed by this instance of\n SpreadsheetWidget.\n \"\"\"\n if not isinstance(spreadsheet, SpreadsheetWidget):\n raise TypeError(\n \"spreadsheet must be modin_spreadsheet.SpreadsheetWidget, not %s\"\n % type(spreadsheet)\n )\n return spreadsheet.get_changed_df()\n", "path": "modin/spreadsheet/general.py"}]} | 2,854 | 231 |
gh_patches_debug_17425 | rasdani/github-patches | git_diff | python-discord__site-716 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValueError: Unknown format code 'X' for object of type 'str'
Sentry Issue: [SITE-25](https://sentry.io/organizations/python-discord/issues/3211854811/?referrer=github_integration)
```
ValueError: Unknown format code 'X' for object of type 'str'
(16 additional frame(s) were not displayed)
...
File "django/template/defaulttags.py", line 211, in render
nodelist.append(node.render_annotated(context))
File "django/template/base.py", line 905, in render_annotated
return self.render(context)
File "django/template/base.py", line 988, in render
output = self.filter_expression.resolve(context)
File "django/template/base.py", line 698, in resolve
new_obj = func(obj, *arg_vals)
File "pydis_site/apps/staff/templatetags/deletedmessage_filters.py", line 15, in hex_colour
colour = f"#{color:0>6X}"
```
</issue>
<code>
[start of pydis_site/apps/staff/templatetags/deletedmessage_filters.py]
1 from datetime import datetime
2
3 from django import template
4
5 register = template.Library()
6
7
8 @register.filter
9 def hex_colour(color: int) -> str:
10 """
11 Converts an integer representation of a colour to the RGB hex value.
12
13 As we are using a Discord dark theme analogue, black colours are returned as white instead.
14 """
15 colour = f"#{color:0>6X}"
16 return colour if colour != "#000000" else "#FFFFFF"
17
18
19 @register.filter
20 def footer_datetime(timestamp: str) -> datetime:
21 """Takes an embed timestamp and returns a timezone-aware datetime object."""
22 return datetime.fromisoformat(timestamp)
23
24
25 @register.filter
26 def visible_newlines(text: str) -> str:
27 """Takes an embed timestamp and returns a timezone-aware datetime object."""
28 return text.replace("\n", " <span class='has-text-grey'>↵</span><br>")
29
[end of pydis_site/apps/staff/templatetags/deletedmessage_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py
--- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py
+++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from typing import Union
from django import template
@@ -6,13 +7,16 @@
@register.filter
-def hex_colour(color: int) -> str:
+def hex_colour(colour: Union[str, int]) -> str:
"""
- Converts an integer representation of a colour to the RGB hex value.
+ Converts the given representation of a colour to its RGB hex string.
As we are using a Discord dark theme analogue, black colours are returned as white instead.
"""
- colour = f"#{color:0>6X}"
+ if isinstance(colour, str):
+ colour = colour if colour.startswith("#") else f"#{colour}"
+ else:
+ colour = f"#{colour:0>6X}"
return colour if colour != "#000000" else "#FFFFFF"
| {"golden_diff": "diff --git a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n--- a/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n+++ b/pydis_site/apps/staff/templatetags/deletedmessage_filters.py\n@@ -1,4 +1,5 @@\n from datetime import datetime\n+from typing import Union\n \n from django import template\n \n@@ -6,13 +7,16 @@\n \n \n @register.filter\n-def hex_colour(color: int) -> str:\n+def hex_colour(colour: Union[str, int]) -> str:\n \"\"\"\n- Converts an integer representation of a colour to the RGB hex value.\n+ Converts the given representation of a colour to its RGB hex string.\n \n As we are using a Discord dark theme analogue, black colours are returned as white instead.\n \"\"\"\n- colour = f\"#{color:0>6X}\"\n+ if isinstance(colour, str):\n+ colour = colour if colour.startswith(\"#\") else f\"#{colour}\"\n+ else:\n+ colour = f\"#{colour:0>6X}\"\n return colour if colour != \"#000000\" else \"#FFFFFF\"\n", "issue": "ValueError: Unknown format code 'X' for object of type 'str'\nSentry Issue: [SITE-25](https://sentry.io/organizations/python-discord/issues/3211854811/?referrer=github_integration)\n\n```\nValueError: Unknown format code 'X' for object of type 'str'\n(16 additional frame(s) were not displayed)\n...\n File \"django/template/defaulttags.py\", line 211, in render\n nodelist.append(node.render_annotated(context))\n File \"django/template/base.py\", line 905, in render_annotated\n return self.render(context)\n File \"django/template/base.py\", line 988, in render\n output = self.filter_expression.resolve(context)\n File \"django/template/base.py\", line 698, in resolve\n new_obj = func(obj, *arg_vals)\n File \"pydis_site/apps/staff/templatetags/deletedmessage_filters.py\", line 15, in hex_colour\n colour = f\"#{color:0>6X}\"\n```\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef hex_colour(color: int) -> str:\n \"\"\"\n Converts an integer representation of a colour to the RGB hex value.\n\n As we are using a Discord dark theme analogue, black colours are returned as white instead.\n \"\"\"\n colour = f\"#{color:0>6X}\"\n return colour if colour != \"#000000\" else \"#FFFFFF\"\n\n\[email protected]\ndef footer_datetime(timestamp: str) -> datetime:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return datetime.fromisoformat(timestamp)\n\n\[email protected]\ndef visible_newlines(text: str) -> str:\n \"\"\"Takes an embed timestamp and returns a timezone-aware datetime object.\"\"\"\n return text.replace(\"\\n\", \" <span class='has-text-grey'>\u21b5</span><br>\")\n", "path": "pydis_site/apps/staff/templatetags/deletedmessage_filters.py"}]} | 1,038 | 273 |
gh_patches_debug_33783 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-6144 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some hooks should get fixed
<!--
Welcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller
- For questions and general support, use the mailing list, see
<http://www.pyinstaller.org/support.html#mailing-list>
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
-->
<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->
## Description of the issue
It returns `ModuleNotFoundError: No module named '_sysconfigdata__win32_'` at `from sklearn.cluster import MiniBatchKMeans` for some reason
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```4.5.1```
* Version of Python: ```Python 3.9.6```
* Platform: ```MSYS2 MINGW64```
* How you installed Python: ```from MSYS2, the MINGW64 one```
* Did you also try this on another platform? Does it work there? ```Nope```
* follow *all* the instructions in our "If Things Go Wrong" Guide
(https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
* [x] start with clean installation
* [ ] use the latest development version
* [x] Run your frozen program **from a command window (shell)** — instead of double-clicking on it ```But it didn't catch the program, like it runs it in separate process. I use windowed mode, anyways```
* [ ] Package your program in **--onedir mode**
* [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file
* [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
### A minimal example program which shows the error
#### window.py
```
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
import os
import gc
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
@Gtk.Template(resource_path='/org/example/App/window.ui')
class ClusterifyWindow(Gtk.ApplicationWindow):
__gtype_name__ = 'ClusterifyWindow'
full_initialized = False
...
def __init__(self, log_error, **kwargs):
super().__init__(**kwargs)
self.log_error = log_error
@Gtk.Template.Callback()
def on_ClusterifyWindow_focus_in_event(self, cb_widget, data):
if not self.full_initialized:
import matplotlib
matplotlib.use("GTK3Cairo")
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
self.fi_clusters = plt.figure()
self.cv_clusters = FigureCanvas(self.fi_clusters)
self.cv_clusters.show_all()
...
self.full_initialized = True
```
#### clusterify.spec
```
# -*- mode: python ; coding: utf-8 -*-
typelib_path = "C:/msys64/mingw64/lib/girepository-1.0"
block_cipher = None
a = Analysis(['C:/msys64/mingw64/bin/clusterify'],
pathex=['C:/msys64/home/rivan/Clusterify'],
binaries=[(os.path.join(typelib_path, tl), 'gi_typelibs') for tl in os.listdir(typelib_path)],
datas=[],
hiddenimports=['numpy',
'pandas',
'sklearn.cluster', 'sklearn.preprocessing', 'sklearn.decomposition',
'matplotlib', 'matplotlib.backends.backend_gtk3cairo',
'seaborn'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='clusterify',
debug=True,
bootloader_ignore_signals=False,
strip=False,
upx=False,
upx_exclude=[],
runtime_tmpdir=None,
console=False,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )
```
### Stacktrace / full error message
```
Traceback (most recent call last):
File "clusterify", line 51, in <module>
File "C:\msys64/mingw64/share/clusterify\clusterify\main.py", line 33, in <module>
from .window import ClusterifyWindow
File "C:\msys64/mingw64/share/clusterify\clusterify\window.py", line 36, in <module>
from sklearn.cluster import MiniBatchKMeans
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "sklearn/__init__.py", line 82, in <module>
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "sklearn/base.py", line 20, in <module>
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "sklearn/utils/__init__.py", line 24, in <module>
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "sklearn/utils/_joblib.py", line 7, in <module>
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "joblib/__init__.py", line 113, in <module>
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "joblib/memory.py", line 16, in <module>
File "PyInstaller/loader/pyimod03_importers.py", line 546, in exec_module
File "pydoc.py", line 458, in <module>
File "pydoc.py", line 488, in Doc
File "sysconfig.py", line 554, in get_path
File "sysconfig.py", line 544, in get_paths
File "sysconfig.py", line 179, in _expand_vars
File "sysconfig.py", line 594, in get_config_vars
File "sysconfig.py", line 460, in _init_posix
ModuleNotFoundError: No module named '_sysconfigdata__win32_'
```
</issue>
<code>
[start of PyInstaller/hooks/hook-distutils.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11 """
12 `distutils`-specific post-import hook.
13
14 This hook freezes the external `Makefile` and `pyconfig.h` files bundled with the active Python interpreter, which the
15 `distutils.sysconfig` module parses at runtime for platform-specific metadata.
16 """
17
18 # From Python 3.6 and later ``distutils.sysconfig`` takes on the same behaviour as regular ``sysconfig`` of moving the
19 # config vars to a module (see hook-sysconfig.py). It doesn't use a nice `get module name` function like ``sysconfig``
20 # does to help us locate it but the module is the same file that ``sysconfig`` uses so we can use the
21 # ``_get_sysconfigdata_name()`` from regular ``sysconfig``.
22 import sysconfig
23
24 from PyInstaller import compat
25
26 if not compat.is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):
27 hiddenimports = [sysconfig._get_sysconfigdata_name()]
28
[end of PyInstaller/hooks/hook-distutils.py]
[start of PyInstaller/hooks/hook-sysconfig.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import sysconfig
13
14 from PyInstaller.compat import is_win
15
16 if not is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):
17 # Python 3.6 uses additional modules like `_sysconfigdata_m_linux_x86_64-linux-gnu`, see
18 # https://github.com/python/cpython/blob/3.6/Lib/sysconfig.py#L417
19 # Note: Some versions of Anaconda backport this feature to before 3.6. See issue #3105.
20 hiddenimports = [sysconfig._get_sysconfigdata_name()]
21
[end of PyInstaller/hooks/hook-sysconfig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-distutils.py b/PyInstaller/hooks/hook-distutils.py
--- a/PyInstaller/hooks/hook-distutils.py
+++ b/PyInstaller/hooks/hook-distutils.py
@@ -19,9 +19,10 @@
# config vars to a module (see hook-sysconfig.py). It doesn't use a nice `get module name` function like ``sysconfig``
# does to help us locate it but the module is the same file that ``sysconfig`` uses so we can use the
# ``_get_sysconfigdata_name()`` from regular ``sysconfig``.
-import sysconfig
-
-from PyInstaller import compat
-
-if not compat.is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):
+try:
+ import sysconfig
hiddenimports = [sysconfig._get_sysconfigdata_name()]
+except AttributeError:
+ # Either sysconfig has no attribute _get_sysconfigdata_name (i.e., the function does not exist), or this is Windows
+ # and the _get_sysconfigdata_name() call failed due to missing sys.abiflags attribute.
+ pass
diff --git a/PyInstaller/hooks/hook-sysconfig.py b/PyInstaller/hooks/hook-sysconfig.py
--- a/PyInstaller/hooks/hook-sysconfig.py
+++ b/PyInstaller/hooks/hook-sysconfig.py
@@ -9,12 +9,15 @@
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
-import sysconfig
-
-from PyInstaller.compat import is_win
-
-if not is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):
- # Python 3.6 uses additional modules like `_sysconfigdata_m_linux_x86_64-linux-gnu`, see
- # https://github.com/python/cpython/blob/3.6/Lib/sysconfig.py#L417
- # Note: Some versions of Anaconda backport this feature to before 3.6. See issue #3105.
+# Python 3.6 uses additional modules like `_sysconfigdata_m_linux_x86_64-linux-gnu`, see
+# https://github.com/python/cpython/blob/3.6/Lib/sysconfig.py#L417
+# Note: Some versions of Anaconda backport this feature to before 3.6. See issue #3105.
+# Note: on Windows, python.org and Anaconda python provide _get_sysconfigdata_name, but calling it fails due to sys
+# module lacking abiflags attribute. It does work on MSYS2/MINGW python, where we need to collect corresponding file.
+try:
+ import sysconfig
hiddenimports = [sysconfig._get_sysconfigdata_name()]
+except AttributeError:
+ # Either sysconfig has no attribute _get_sysconfigdata_name (i.e., the function does not exist), or this is Windows
+ # and the _get_sysconfigdata_name() call failed due to missing sys.abiflags attribute.
+ pass
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-distutils.py b/PyInstaller/hooks/hook-distutils.py\n--- a/PyInstaller/hooks/hook-distutils.py\n+++ b/PyInstaller/hooks/hook-distutils.py\n@@ -19,9 +19,10 @@\n # config vars to a module (see hook-sysconfig.py). It doesn't use a nice `get module name` function like ``sysconfig``\n # does to help us locate it but the module is the same file that ``sysconfig`` uses so we can use the\n # ``_get_sysconfigdata_name()`` from regular ``sysconfig``.\n-import sysconfig\n-\n-from PyInstaller import compat\n-\n-if not compat.is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):\n+try:\n+ import sysconfig\n hiddenimports = [sysconfig._get_sysconfigdata_name()]\n+except AttributeError:\n+ # Either sysconfig has no attribute _get_sysconfigdata_name (i.e., the function does not exist), or this is Windows\n+ # and the _get_sysconfigdata_name() call failed due to missing sys.abiflags attribute.\n+ pass\ndiff --git a/PyInstaller/hooks/hook-sysconfig.py b/PyInstaller/hooks/hook-sysconfig.py\n--- a/PyInstaller/hooks/hook-sysconfig.py\n+++ b/PyInstaller/hooks/hook-sysconfig.py\n@@ -9,12 +9,15 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n-import sysconfig\n-\n-from PyInstaller.compat import is_win\n-\n-if not is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):\n- # Python 3.6 uses additional modules like `_sysconfigdata_m_linux_x86_64-linux-gnu`, see\n- # https://github.com/python/cpython/blob/3.6/Lib/sysconfig.py#L417\n- # Note: Some versions of Anaconda backport this feature to before 3.6. See issue #3105.\n+# Python 3.6 uses additional modules like `_sysconfigdata_m_linux_x86_64-linux-gnu`, see\n+# https://github.com/python/cpython/blob/3.6/Lib/sysconfig.py#L417\n+# Note: Some versions of Anaconda backport this feature to before 3.6. See issue #3105.\n+# Note: on Windows, python.org and Anaconda python provide _get_sysconfigdata_name, but calling it fails due to sys\n+# module lacking abiflags attribute. It does work on MSYS2/MINGW python, where we need to collect corresponding file.\n+try:\n+ import sysconfig\n hiddenimports = [sysconfig._get_sysconfigdata_name()]\n+except AttributeError:\n+ # Either sysconfig has no attribute _get_sysconfigdata_name (i.e., the function does not exist), or this is Windows\n+ # and the _get_sysconfigdata_name() call failed due to missing sys.abiflags attribute.\n+ pass\n", "issue": "Some hooks should get fixed\n<!--\r\nWelcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller\r\n - For questions and general support, use the mailing list, see\r\n\t <http://www.pyinstaller.org/support.html#mailing-list>\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n-->\r\n\r\n<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->\r\n\r\n## Description of the issue\r\nIt returns `ModuleNotFoundError: No module named '_sysconfigdata__win32_'` at `from sklearn.cluster import MiniBatchKMeans` for some reason\r\n\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.5.1```\r\n* Version of Python: ```Python 3.9.6```\r\n* Platform: ```MSYS2 MINGW64```\r\n* How you installed Python: ```from MSYS2, the MINGW64 one```\r\n* Did you also try this on another platform? Does it work there? ```Nope```\r\n\r\n* follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n\r\n### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n\r\n * [x] start with clean installation\r\n * [ ] use the latest development version\r\n * [x] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it ```But it didn't catch the program, like it runs it in separate process. I use windowed mode, anyways```\r\n * [ ] Package your program in **--onedir mode**\r\n * [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n * [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n\r\n\r\n### A minimal example program which shows the error\r\n#### window.py\r\n```\r\nimport gi\r\ngi.require_version('Gtk', '3.0')\r\nfrom gi.repository import Gtk, Gio\r\nimport os\r\nimport gc\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.cluster import MiniBatchKMeans\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\n\r\[email protected](resource_path='/org/example/App/window.ui')\r\nclass ClusterifyWindow(Gtk.ApplicationWindow):\r\n __gtype_name__ = 'ClusterifyWindow'\r\n full_initialized = False\r\n\r\n ...\r\n\r\n def __init__(self, log_error, **kwargs):\r\n super().__init__(**kwargs)\r\n self.log_error = log_error\r\n\r\n @Gtk.Template.Callback()\r\n def on_ClusterifyWindow_focus_in_event(self, cb_widget, data):\r\n if not self.full_initialized:\r\n import matplotlib\r\n matplotlib.use(\"GTK3Cairo\")\r\n import matplotlib.pyplot as plt\r\n import seaborn as sns\r\n sns.set()\r\n from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas\r\n\r\n self.fi_clusters = plt.figure()\r\n self.cv_clusters = FigureCanvas(self.fi_clusters)\r\n self.cv_clusters.show_all()\r\n\r\n ...\r\n\r\n self.full_initialized = True\r\n```\r\n\r\n#### clusterify.spec\r\n```\r\n# -*- mode: python ; coding: utf-8 -*-\r\n\r\ntypelib_path = \"C:/msys64/mingw64/lib/girepository-1.0\"\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['C:/msys64/mingw64/bin/clusterify'],\r\n pathex=['C:/msys64/home/rivan/Clusterify'],\r\n binaries=[(os.path.join(typelib_path, tl), 'gi_typelibs') for tl in os.listdir(typelib_path)],\r\n datas=[],\r\n hiddenimports=['numpy',\r\n 'pandas',\r\n 'sklearn.cluster', 'sklearn.preprocessing', 'sklearn.decomposition',\r\n 'matplotlib', 'matplotlib.backends.backend_gtk3cairo',\r\n 'seaborn'],\r\n hookspath=[],\r\n hooksconfig={},\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher,\r\n noarchive=False)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\n\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas, \r\n [],\r\n name='clusterify',\r\n debug=True,\r\n bootloader_ignore_signals=False,\r\n strip=False,\r\n upx=False,\r\n upx_exclude=[],\r\n runtime_tmpdir=None,\r\n console=False,\r\n disable_windowed_traceback=False,\r\n target_arch=None,\r\n codesign_identity=None,\r\n entitlements_file=None )\r\n```\r\n\r\n### Stacktrace / full error message\r\n```\r\nTraceback (most recent call last):\r\n File \"clusterify\", line 51, in <module>\r\n File \"C:\\msys64/mingw64/share/clusterify\\clusterify\\main.py\", line 33, in <module>\r\n from .window import ClusterifyWindow\r\n File \"C:\\msys64/mingw64/share/clusterify\\clusterify\\window.py\", line 36, in <module>\r\n from sklearn.cluster import MiniBatchKMeans\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"sklearn/__init__.py\", line 82, in <module>\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"sklearn/base.py\", line 20, in <module>\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"sklearn/utils/__init__.py\", line 24, in <module>\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"sklearn/utils/_joblib.py\", line 7, in <module>\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"joblib/__init__.py\", line 113, in <module>\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"joblib/memory.py\", line 16, in <module>\r\n File \"PyInstaller/loader/pyimod03_importers.py\", line 546, in exec_module\r\n File \"pydoc.py\", line 458, in <module>\r\n File \"pydoc.py\", line 488, in Doc\r\n File \"sysconfig.py\", line 554, in get_path\r\n File \"sysconfig.py\", line 544, in get_paths\r\n File \"sysconfig.py\", line 179, in _expand_vars\r\n File \"sysconfig.py\", line 594, in get_config_vars\r\n File \"sysconfig.py\", line 460, in _init_posix\r\nModuleNotFoundError: No module named '_sysconfigdata__win32_'\r\n```\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\n`distutils`-specific post-import hook.\n\nThis hook freezes the external `Makefile` and `pyconfig.h` files bundled with the active Python interpreter, which the\n`distutils.sysconfig` module parses at runtime for platform-specific metadata.\n\"\"\"\n\n# From Python 3.6 and later ``distutils.sysconfig`` takes on the same behaviour as regular ``sysconfig`` of moving the\n# config vars to a module (see hook-sysconfig.py). It doesn't use a nice `get module name` function like ``sysconfig``\n# does to help us locate it but the module is the same file that ``sysconfig`` uses so we can use the\n# ``_get_sysconfigdata_name()`` from regular ``sysconfig``.\nimport sysconfig\n\nfrom PyInstaller import compat\n\nif not compat.is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):\n hiddenimports = [sysconfig._get_sysconfigdata_name()]\n", "path": "PyInstaller/hooks/hook-distutils.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport sysconfig\n\nfrom PyInstaller.compat import is_win\n\nif not is_win and hasattr(sysconfig, '_get_sysconfigdata_name'):\n # Python 3.6 uses additional modules like `_sysconfigdata_m_linux_x86_64-linux-gnu`, see\n # https://github.com/python/cpython/blob/3.6/Lib/sysconfig.py#L417\n # Note: Some versions of Anaconda backport this feature to before 3.6. See issue #3105.\n hiddenimports = [sysconfig._get_sysconfigdata_name()]\n", "path": "PyInstaller/hooks/hook-sysconfig.py"}]} | 2,844 | 668 |
gh_patches_debug_9896 | rasdani/github-patches | git_diff | pytorch__TensorRT-2515 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for `aten.select_scatter`
</issue>
<code>
[start of py/torch_tensorrt/dynamo/lowering/_decompositions.py]
1 import logging
2 from typing import Any, Callable, Dict, List, Optional
3
4 import torch
5 from torch._decomp import register_decomposition
6 from torch._ops import OpOverload
7 from torch_tensorrt.dynamo.conversion.converter_utils import get_positive_dim
8
9 from ._decomposition_groups import (
10 ENABLED_TORCH_DECOMPOSITIONS,
11 TORCH_TRT_DECOMPOSITIONS,
12 _core_aten_decompositions,
13 aten,
14 torch_disabled_decompositions,
15 torch_enabled_decompositions,
16 )
17
18 logger = logging.getLogger(__name__)
19
20
21 def register_torch_trt_decomposition(
22 aten_op: OpOverload, registry: Optional[Any] = None
23 ) -> Callable[[Any], Any]:
24 """Checks if the decomposition already exists in one of the sets
25 Registers the decomposition via the Torch utility
26
27 Alerts the user if the decomposition already exists, before registering
28 Throws an AssertionError if the user attempts to register a decomposition
29 which is present in the set of explicitly disabled decompositions
30 """
31 if aten_op in torch_enabled_decompositions:
32 logger.warning(
33 f"Detected custom decomposition for {aten_op}, which conflicts "
34 "with an existing Torch decomposition in torch_enabled_decompositions. "
35 "The custom implementation will take precedence."
36 )
37 elif aten_op in torch_disabled_decompositions:
38 logger.info(
39 f"Detected custom decomposition for {aten_op}, which is present "
40 "in torch_disabled_decompositions."
41 )
42
43 # Conflicts with _core_aten_decompositions will only occur if
44 # enable_experimental_decompositions is True in get_decompositions
45 if aten_op in _core_aten_decompositions:
46 logger.debug(
47 f"Detected custom decomposition for {aten_op}, which conflicts "
48 "with an existing Torch decomposition in core_aten_decompositions. "
49 "The custom implementation will take precedence."
50 )
51
52 def register(fn: Callable[[Any], Any]) -> Any:
53 return register_decomposition(aten_op=aten_op, registry=registry)(fn)
54
55 return register
56
57
58 def replace_inplace_op(aten_op: OpOverload, outplace_op: OpOverload) -> Any:
59 """Replace inplace operation with functional equivalent
60 Adapted from:
61 https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361
62 """
63
64 @register_torch_trt_decomposition(aten_op, registry=TORCH_TRT_DECOMPOSITIONS)
65 def inplace_op(*args, **kwargs): # type: ignore
66 out = outplace_op(*args, **kwargs)
67 return args[0].copy_(out)
68
69 return inplace_op
70
71
72 replace_inplace_op(aten.add_, aten.add)
73 replace_inplace_op(aten.addbmm_, aten.addbmm)
74 replace_inplace_op(aten.addmm_, aten.addmm)
75 replace_inplace_op(aten.addmv_, aten.addmv)
76 replace_inplace_op(aten.baddbmm_, aten.baddbmm)
77 replace_inplace_op(aten.cumprod_, aten.cumprod)
78 replace_inplace_op(aten.index_put_, aten.index_put)
79 replace_inplace_op(aten.index_reduce_, aten.index_reduce)
80 replace_inplace_op(aten.relu_, aten.relu)
81 replace_inplace_op(aten.round_, aten.round)
82 replace_inplace_op(aten.scatter_, aten.scatter)
83 replace_inplace_op(aten.scatter_add_, aten.scatter_add)
84 replace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)
85
86
87 @register_torch_trt_decomposition(aten.rsqrt, registry=TORCH_TRT_DECOMPOSITIONS)
88 def rsqrt_replacement(*args, **kwargs) -> torch.Tensor: # type: ignore
89 return torch.reciprocal(torch.sqrt(*args, **kwargs))
90
91
92 @register_torch_trt_decomposition(aten._unsafe_view, registry=TORCH_TRT_DECOMPOSITIONS)
93 def unsafe_view_replacement(x: torch.Tensor, *args, **kwargs) -> torch.Tensor: # type: ignore
94 return torch.reshape(x, *args, **kwargs)
95
96
97 @register_torch_trt_decomposition(
98 torch.ops.aten.lift_fresh_copy, registry=TORCH_TRT_DECOMPOSITIONS
99 )
100 def lift_fresh_copy_replacement(x: torch.Tensor) -> torch.Tensor:
101 return x
102
103
104 @register_torch_trt_decomposition(aten.alias, registry=TORCH_TRT_DECOMPOSITIONS)
105 def alias_replacement(x: torch.Tensor) -> torch.Tensor:
106 return x
107
108
109 @register_torch_trt_decomposition(
110 torch.ops.aten.reciprocal.default, registry=TORCH_TRT_DECOMPOSITIONS
111 )
112 def reciprocal_replacement(
113 input_: torch.Tensor,
114 ) -> torch.Tensor:
115 return torch.div(1, input_)
116
117
118 @register_torch_trt_decomposition(
119 torch.ops.prims.var.default, registry=TORCH_TRT_DECOMPOSITIONS
120 )
121 def var_decomposition(
122 input_tensor: torch.Tensor,
123 dims: Optional[List[int]],
124 correction: int,
125 output_dtype: Optional[torch.dtype] = None,
126 ) -> torch.Tensor:
127 if dims is None:
128 dims = []
129
130 # If the dimensions are empty, variance is taken over all dimensions
131 if isinstance(dims, (tuple, list)) and len(dims) == 0:
132 N = input_tensor.numel()
133 # Otherwise, the number of samples is the product of the dimensions reduced over
134 else:
135 N = 1
136 for dim_i in dims:
137 N *= input_tensor.shape[dim_i]
138
139 # Compute the mean, difference, and correction term as per the formula:
140 # https://pytorch.org/docs/stable/generated/torch.var.html
141
142 # Additionally, prims does not support keepdim, and so we only keep dimensions
143 # on the first reduction, then remove it for the second
144 sample_mean = torch.mean(input_tensor, dims, keepdim=True)
145 diff = input_tensor - sample_mean
146 squared_diff = diff * diff
147 variance_unnormalized = torch.sum(squared_diff, dims, keepdim=False)
148
149 if correction is None:
150 correction_term = float(N - 1)
151 elif isinstance(correction, int):
152 correction_term = float(N - correction)
153 elif isinstance(correction, float):
154 correction_term = float(N) - correction
155 else:
156 raise RuntimeError("correction must be int or float")
157
158 if correction_term <= 0:
159 raise RuntimeError(f"correction term was non-positive, got: {correction_term}")
160
161 variance = variance_unnormalized / correction_term
162
163 return variance
164
165
166 @register_torch_trt_decomposition(
167 torch.ops.aten.empty_permuted.default, registry=TORCH_TRT_DECOMPOSITIONS
168 )
169 def empty_permuted_decomposition(*args, **kwargs) -> torch.Tensor:
170 empty_size = args[0]
171 empty_permute = args[1]
172 perm = [0] * len(empty_size)
173 for permute_index, permute_element in enumerate(empty_permute):
174 perm[permute_element] = permute_index
175 return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)
176
177
178 @register_torch_trt_decomposition(
179 torch.ops.aten.slice_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS
180 )
181 def slice_scatter_decomposition(
182 input_tensor: torch.Tensor,
183 src_tensor: torch.Tensor,
184 dim: int,
185 start: Optional[int] = None,
186 end: Optional[int] = None,
187 step: Optional[int] = None,
188 ):
189 dim_size = input_tensor.shape[dim]
190 start = get_positive_dim(start, input_tensor.shape[dim])
191 if end is None:
192 end = dim_size
193 end = get_positive_dim(end, input_tensor.shape[dim])
194 if step is None:
195 step = 1
196
197 src_dim = src_tensor.shape
198 # step == 0 is not a valid torch case
199 # also src_dim should be equal to slice dimension
200
201 if start == 0 and end == dim_size and step == 1:
202 return src_tensor
203
204 cat_tensors = []
205 index_tensor_shape = []
206 for i, src_each_dim in enumerate(list(src_dim)):
207 if i != dim:
208 index_tensor_shape.append(src_each_dim)
209 for index in range(start, end, step):
210 cat_tensors.append(index * torch.ones(index_tensor_shape, dtype=torch.long))
211 index_tensor = torch.stack(cat_tensors, dim).cuda()
212 output_tensor = torch.scatter(input_tensor, dim, index_tensor, src_tensor)
213 return output_tensor
214
215
216 def get_decompositions(
217 enable_experimental_decompositions: bool = False,
218 ) -> Dict[OpOverload, Callable[[Any], Any]]:
219 if enable_experimental_decompositions:
220 CORE_ATEN_DECOMPOSITIONS_FILTERED: Dict[OpOverload, Callable[[Any], Any]] = {
221 decomp: _core_aten_decompositions[decomp]
222 for decomp in _core_aten_decompositions
223 if decomp not in torch_disabled_decompositions
224 }
225 return {**CORE_ATEN_DECOMPOSITIONS_FILTERED, **TORCH_TRT_DECOMPOSITIONS}
226 else:
227 return {**ENABLED_TORCH_DECOMPOSITIONS, **TORCH_TRT_DECOMPOSITIONS}
228
[end of py/torch_tensorrt/dynamo/lowering/_decompositions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/lowering/_decompositions.py
--- a/py/torch_tensorrt/dynamo/lowering/_decompositions.py
+++ b/py/torch_tensorrt/dynamo/lowering/_decompositions.py
@@ -213,6 +213,19 @@
return output_tensor
+@register_torch_trt_decomposition(
+ torch.ops.aten.select_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS
+)
+def select_scatter_decomposition(
+ input_tensor: torch.Tensor,
+ src_tensor: torch.Tensor,
+ dim: int,
+ index: int,
+) -> torch.Tensor:
+ src_tensor = torch.unsqueeze(src_tensor, dim)
+ return torch.slice_scatter(input_tensor, src_tensor, dim, index, index + 1, 1)
+
+
def get_decompositions(
enable_experimental_decompositions: bool = False,
) -> Dict[OpOverload, Callable[[Any], Any]]:
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/lowering/_decompositions.py\n--- a/py/torch_tensorrt/dynamo/lowering/_decompositions.py\n+++ b/py/torch_tensorrt/dynamo/lowering/_decompositions.py\n@@ -213,6 +213,19 @@\n return output_tensor\n \n \n+@register_torch_trt_decomposition(\n+ torch.ops.aten.select_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS\n+)\n+def select_scatter_decomposition(\n+ input_tensor: torch.Tensor,\n+ src_tensor: torch.Tensor,\n+ dim: int,\n+ index: int,\n+) -> torch.Tensor:\n+ src_tensor = torch.unsqueeze(src_tensor, dim)\n+ return torch.slice_scatter(input_tensor, src_tensor, dim, index, index + 1, 1)\n+\n+\n def get_decompositions(\n enable_experimental_decompositions: bool = False,\n ) -> Dict[OpOverload, Callable[[Any], Any]]:\n", "issue": "Add support for `aten.select_scatter`\n\n", "before_files": [{"content": "import logging\nfrom typing import Any, Callable, Dict, List, Optional\n\nimport torch\nfrom torch._decomp import register_decomposition\nfrom torch._ops import OpOverload\nfrom torch_tensorrt.dynamo.conversion.converter_utils import get_positive_dim\n\nfrom ._decomposition_groups import (\n ENABLED_TORCH_DECOMPOSITIONS,\n TORCH_TRT_DECOMPOSITIONS,\n _core_aten_decompositions,\n aten,\n torch_disabled_decompositions,\n torch_enabled_decompositions,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_torch_trt_decomposition(\n aten_op: OpOverload, registry: Optional[Any] = None\n) -> Callable[[Any], Any]:\n \"\"\"Checks if the decomposition already exists in one of the sets\n Registers the decomposition via the Torch utility\n\n Alerts the user if the decomposition already exists, before registering\n Throws an AssertionError if the user attempts to register a decomposition\n which is present in the set of explicitly disabled decompositions\n \"\"\"\n if aten_op in torch_enabled_decompositions:\n logger.warning(\n f\"Detected custom decomposition for {aten_op}, which conflicts \"\n \"with an existing Torch decomposition in torch_enabled_decompositions. \"\n \"The custom implementation will take precedence.\"\n )\n elif aten_op in torch_disabled_decompositions:\n logger.info(\n f\"Detected custom decomposition for {aten_op}, which is present \"\n \"in torch_disabled_decompositions.\"\n )\n\n # Conflicts with _core_aten_decompositions will only occur if\n # enable_experimental_decompositions is True in get_decompositions\n if aten_op in _core_aten_decompositions:\n logger.debug(\n f\"Detected custom decomposition for {aten_op}, which conflicts \"\n \"with an existing Torch decomposition in core_aten_decompositions. \"\n \"The custom implementation will take precedence.\"\n )\n\n def register(fn: Callable[[Any], Any]) -> Any:\n return register_decomposition(aten_op=aten_op, registry=registry)(fn)\n\n return register\n\n\ndef replace_inplace_op(aten_op: OpOverload, outplace_op: OpOverload) -> Any:\n \"\"\"Replace inplace operation with functional equivalent\n Adapted from:\n https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361\n \"\"\"\n\n @register_torch_trt_decomposition(aten_op, registry=TORCH_TRT_DECOMPOSITIONS)\n def inplace_op(*args, **kwargs): # type: ignore\n out = outplace_op(*args, **kwargs)\n return args[0].copy_(out)\n\n return inplace_op\n\n\nreplace_inplace_op(aten.add_, aten.add)\nreplace_inplace_op(aten.addbmm_, aten.addbmm)\nreplace_inplace_op(aten.addmm_, aten.addmm)\nreplace_inplace_op(aten.addmv_, aten.addmv)\nreplace_inplace_op(aten.baddbmm_, aten.baddbmm)\nreplace_inplace_op(aten.cumprod_, aten.cumprod)\nreplace_inplace_op(aten.index_put_, aten.index_put)\nreplace_inplace_op(aten.index_reduce_, aten.index_reduce)\nreplace_inplace_op(aten.relu_, aten.relu)\nreplace_inplace_op(aten.round_, aten.round)\nreplace_inplace_op(aten.scatter_, aten.scatter)\nreplace_inplace_op(aten.scatter_add_, aten.scatter_add)\nreplace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)\n\n\n@register_torch_trt_decomposition(aten.rsqrt, registry=TORCH_TRT_DECOMPOSITIONS)\ndef rsqrt_replacement(*args, **kwargs) -> torch.Tensor: # type: ignore\n return torch.reciprocal(torch.sqrt(*args, **kwargs))\n\n\n@register_torch_trt_decomposition(aten._unsafe_view, registry=TORCH_TRT_DECOMPOSITIONS)\ndef unsafe_view_replacement(x: torch.Tensor, *args, **kwargs) -> torch.Tensor: # type: ignore\n return torch.reshape(x, *args, **kwargs)\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.lift_fresh_copy, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef lift_fresh_copy_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\n@register_torch_trt_decomposition(aten.alias, registry=TORCH_TRT_DECOMPOSITIONS)\ndef alias_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.reciprocal.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef reciprocal_replacement(\n input_: torch.Tensor,\n) -> torch.Tensor:\n return torch.div(1, input_)\n\n\n@register_torch_trt_decomposition(\n torch.ops.prims.var.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef var_decomposition(\n input_tensor: torch.Tensor,\n dims: Optional[List[int]],\n correction: int,\n output_dtype: Optional[torch.dtype] = None,\n) -> torch.Tensor:\n if dims is None:\n dims = []\n\n # If the dimensions are empty, variance is taken over all dimensions\n if isinstance(dims, (tuple, list)) and len(dims) == 0:\n N = input_tensor.numel()\n # Otherwise, the number of samples is the product of the dimensions reduced over\n else:\n N = 1\n for dim_i in dims:\n N *= input_tensor.shape[dim_i]\n\n # Compute the mean, difference, and correction term as per the formula:\n # https://pytorch.org/docs/stable/generated/torch.var.html\n\n # Additionally, prims does not support keepdim, and so we only keep dimensions\n # on the first reduction, then remove it for the second\n sample_mean = torch.mean(input_tensor, dims, keepdim=True)\n diff = input_tensor - sample_mean\n squared_diff = diff * diff\n variance_unnormalized = torch.sum(squared_diff, dims, keepdim=False)\n\n if correction is None:\n correction_term = float(N - 1)\n elif isinstance(correction, int):\n correction_term = float(N - correction)\n elif isinstance(correction, float):\n correction_term = float(N) - correction\n else:\n raise RuntimeError(\"correction must be int or float\")\n\n if correction_term <= 0:\n raise RuntimeError(f\"correction term was non-positive, got: {correction_term}\")\n\n variance = variance_unnormalized / correction_term\n\n return variance\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.empty_permuted.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef empty_permuted_decomposition(*args, **kwargs) -> torch.Tensor:\n empty_size = args[0]\n empty_permute = args[1]\n perm = [0] * len(empty_size)\n for permute_index, permute_element in enumerate(empty_permute):\n perm[permute_element] = permute_index\n return torch.empty([empty_size[l] for l in empty_permute], **kwargs).permute(perm)\n\n\n@register_torch_trt_decomposition(\n torch.ops.aten.slice_scatter.default, registry=TORCH_TRT_DECOMPOSITIONS\n)\ndef slice_scatter_decomposition(\n input_tensor: torch.Tensor,\n src_tensor: torch.Tensor,\n dim: int,\n start: Optional[int] = None,\n end: Optional[int] = None,\n step: Optional[int] = None,\n):\n dim_size = input_tensor.shape[dim]\n start = get_positive_dim(start, input_tensor.shape[dim])\n if end is None:\n end = dim_size\n end = get_positive_dim(end, input_tensor.shape[dim])\n if step is None:\n step = 1\n\n src_dim = src_tensor.shape\n # step == 0 is not a valid torch case\n # also src_dim should be equal to slice dimension\n\n if start == 0 and end == dim_size and step == 1:\n return src_tensor\n\n cat_tensors = []\n index_tensor_shape = []\n for i, src_each_dim in enumerate(list(src_dim)):\n if i != dim:\n index_tensor_shape.append(src_each_dim)\n for index in range(start, end, step):\n cat_tensors.append(index * torch.ones(index_tensor_shape, dtype=torch.long))\n index_tensor = torch.stack(cat_tensors, dim).cuda()\n output_tensor = torch.scatter(input_tensor, dim, index_tensor, src_tensor)\n return output_tensor\n\n\ndef get_decompositions(\n enable_experimental_decompositions: bool = False,\n) -> Dict[OpOverload, Callable[[Any], Any]]:\n if enable_experimental_decompositions:\n CORE_ATEN_DECOMPOSITIONS_FILTERED: Dict[OpOverload, Callable[[Any], Any]] = {\n decomp: _core_aten_decompositions[decomp]\n for decomp in _core_aten_decompositions\n if decomp not in torch_disabled_decompositions\n }\n return {**CORE_ATEN_DECOMPOSITIONS_FILTERED, **TORCH_TRT_DECOMPOSITIONS}\n else:\n return {**ENABLED_TORCH_DECOMPOSITIONS, **TORCH_TRT_DECOMPOSITIONS}\n", "path": "py/torch_tensorrt/dynamo/lowering/_decompositions.py"}]} | 3,221 | 235 |
gh_patches_debug_24987 | rasdani/github-patches | git_diff | ray-project__ray-9300 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tune][rllib] Windows: GPU not recognized
### What is the problem?
I'm getting `ray.tune.error.TuneError: Insufficient cluster resources to launch trial`.
I specified a GPU in my config but ray does not recognize my GPU (RTX 2080) and throws an error.
I can get passed this by setting `num_gpus: 0` in my config for now.
https://gist.github.com/juliusfrost/fa7ebbb8d1dfc66eea0bbc4babcbe5aa
### Reproduction (REQUIRED)
```
git clone https://github.com/juliusfrost/rllib-tune-atari.git
cd rllib-tune-atari
pip install -r requirements.txt
python train.py --algo a2c
```
- [x] I have verified my script runs in a clean environment and reproduces the issue.
- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).
</issue>
<code>
[start of python/ray/resource_spec.py]
1 import math
2 from collections import namedtuple
3 import logging
4 import multiprocessing
5 import os
6
7 import ray
8 import ray.ray_constants as ray_constants
9
10 logger = logging.getLogger(__name__)
11
12 # Prefix for the node id resource that is automatically added to each node.
13 # For example, a node may have id `node:172.23.42.1`.
14 NODE_ID_PREFIX = "node:"
15
16
17 class ResourceSpec(
18 namedtuple("ResourceSpec", [
19 "num_cpus", "num_gpus", "memory", "object_store_memory",
20 "resources", "redis_max_memory"
21 ])):
22 """Represents the resource configuration passed to a raylet.
23
24 All fields can be None. Before starting services, resolve() should be
25 called to return a ResourceSpec with unknown values filled in with
26 defaults based on the local machine specifications.
27
28 Attributes:
29 num_cpus: The CPUs allocated for this raylet.
30 num_gpus: The GPUs allocated for this raylet.
31 memory: The memory allocated for this raylet.
32 object_store_memory: The object store memory allocated for this raylet.
33 Note that when calling to_resource_dict(), this will be scaled down
34 by 30% to account for the global plasma LRU reserve.
35 resources: The custom resources allocated for this raylet.
36 redis_max_memory: The max amount of memory (in bytes) to allow each
37 redis shard to use. Once the limit is exceeded, redis will start
38 LRU eviction of entries. This only applies to the sharded redis
39 tables (task, object, and profile tables). By default, this is
40 capped at 10GB but can be set higher.
41 """
42
43 def __new__(cls,
44 num_cpus=None,
45 num_gpus=None,
46 memory=None,
47 object_store_memory=None,
48 resources=None,
49 redis_max_memory=None):
50 return super(ResourceSpec, cls).__new__(cls, num_cpus, num_gpus,
51 memory, object_store_memory,
52 resources, redis_max_memory)
53
54 def resolved(self):
55 """Returns if this ResourceSpec has default values filled out."""
56 for v in self._asdict().values():
57 if v is None:
58 return False
59 return True
60
61 def to_resource_dict(self):
62 """Returns a dict suitable to pass to raylet initialization.
63
64 This renames num_cpus / num_gpus to "CPU" / "GPU", translates memory
65 from bytes into 100MB memory units, and checks types.
66 """
67 assert self.resolved()
68
69 memory_units = ray_constants.to_memory_units(
70 self.memory, round_up=False)
71 reservable_object_store_memory = (
72 self.object_store_memory *
73 ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION)
74 if (reservable_object_store_memory <
75 ray_constants.MEMORY_RESOURCE_UNIT_BYTES):
76 raise ValueError(
77 "The minimum amount of object_store_memory that can be "
78 "requested is {}, but you specified {}.".format(
79 int(
80 math.ceil(
81 ray_constants.MEMORY_RESOURCE_UNIT_BYTES /
82 ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION)),
83 self.object_store_memory))
84 object_store_memory_units = ray_constants.to_memory_units(
85 self.object_store_memory *
86 ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION,
87 round_up=False)
88
89 resources = dict(
90 self.resources,
91 CPU=self.num_cpus,
92 GPU=self.num_gpus,
93 memory=memory_units,
94 object_store_memory=object_store_memory_units)
95
96 resources = {
97 resource_label: resource_quantity
98 for resource_label, resource_quantity in resources.items()
99 if resource_quantity != 0
100 }
101
102 # Check types.
103 for resource_label, resource_quantity in resources.items():
104 assert (isinstance(resource_quantity, int)
105 or isinstance(resource_quantity, float))
106 if (isinstance(resource_quantity, float)
107 and not resource_quantity.is_integer()):
108 raise ValueError(
109 "Resource quantities must all be whole numbers. "
110 "Violated by resource '{}' in {}.".format(
111 resource_label, resources))
112 if resource_quantity < 0:
113 raise ValueError("Resource quantities must be nonnegative. "
114 "Violated by resource '{}' in {}.".format(
115 resource_label, resources))
116 if resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY:
117 raise ValueError("Resource quantities must be at most {}. "
118 "Violated by resource '{}' in {}.".format(
119 ray_constants.MAX_RESOURCE_QUANTITY,
120 resource_label, resources))
121
122 return resources
123
124 def resolve(self, is_head, node_ip_address=None):
125 """Returns a copy with values filled out with system defaults.
126
127 Args:
128 is_head (bool): Whether this is the head node.
129 node_ip_address (str): The IP address of the node that we are on.
130 This is used to automatically create a node id resource.
131 """
132
133 resources = (self.resources or {}).copy()
134 assert "CPU" not in resources, resources
135 assert "GPU" not in resources, resources
136 assert "memory" not in resources, resources
137 assert "object_store_memory" not in resources, resources
138
139 if node_ip_address is None:
140 node_ip_address = ray.services.get_node_ip_address()
141
142 # Automatically create a node id resource on each node. This is
143 # queryable with ray.state.node_ids() and ray.state.current_node_id().
144 resources[NODE_ID_PREFIX + node_ip_address] = 1.0
145
146 num_cpus = self.num_cpus
147 if num_cpus is None:
148 num_cpus = multiprocessing.cpu_count()
149
150 num_gpus = self.num_gpus
151 gpu_ids = ray.utils.get_cuda_visible_devices()
152 # Check that the number of GPUs that the raylet wants doesn't
153 # excede the amount allowed by CUDA_VISIBLE_DEVICES.
154 if (num_gpus is not None and gpu_ids is not None
155 and num_gpus > len(gpu_ids)):
156 raise ValueError("Attempting to start raylet with {} GPUs, "
157 "but CUDA_VISIBLE_DEVICES contains {}.".format(
158 num_gpus, gpu_ids))
159 if num_gpus is None:
160 # Try to automatically detect the number of GPUs.
161 num_gpus = _autodetect_num_gpus()
162 # Don't use more GPUs than allowed by CUDA_VISIBLE_DEVICES.
163 if gpu_ids is not None:
164 num_gpus = min(num_gpus, len(gpu_ids))
165
166 # Choose a default object store size.
167 system_memory = ray.utils.get_system_memory()
168 avail_memory = ray.utils.estimate_available_memory()
169 object_store_memory = self.object_store_memory
170 if object_store_memory is None:
171 object_store_memory = int(avail_memory * 0.3)
172 # Cap memory to avoid memory waste and perf issues on large nodes
173 if (object_store_memory >
174 ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES):
175 logger.debug(
176 "Warning: Capping object memory store to {}GB. ".format(
177 ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES //
178 1e9) +
179 "To increase this further, specify `object_store_memory` "
180 "when calling ray.init() or ray start.")
181 object_store_memory = (
182 ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES)
183
184 redis_max_memory = self.redis_max_memory
185 if redis_max_memory is None:
186 redis_max_memory = min(
187 ray_constants.DEFAULT_REDIS_MAX_MEMORY_BYTES,
188 max(
189 int(avail_memory * 0.1),
190 ray_constants.REDIS_MINIMUM_MEMORY_BYTES))
191 if redis_max_memory < ray_constants.REDIS_MINIMUM_MEMORY_BYTES:
192 raise ValueError(
193 "Attempting to cap Redis memory usage at {} bytes, "
194 "but the minimum allowed is {} bytes.".format(
195 redis_max_memory,
196 ray_constants.REDIS_MINIMUM_MEMORY_BYTES))
197
198 memory = self.memory
199 if memory is None:
200 memory = (avail_memory - object_store_memory - (redis_max_memory
201 if is_head else 0))
202 if memory < 100e6 and memory < 0.05 * system_memory:
203 raise ValueError(
204 "After taking into account object store and redis memory "
205 "usage, the amount of memory on this node available for "
206 "tasks and actors ({} GB) is less than {}% of total. "
207 "You can adjust these settings with "
208 "ray.init(memory=<bytes>, "
209 "object_store_memory=<bytes>).".format(
210 round(memory / 1e9, 2),
211 int(100 * (memory / system_memory))))
212
213 logger.info(
214 "Starting Ray with {} GiB memory available for workers and up to "
215 "{} GiB for objects. You can adjust these settings "
216 "with ray.init(memory=<bytes>, "
217 "object_store_memory=<bytes>).".format(
218 round(
219 ray_constants.round_to_memory_units(
220 memory, round_up=False) / (1024**3), 2),
221 round(object_store_memory / (1024**3), 2)))
222
223 spec = ResourceSpec(num_cpus, num_gpus, memory, object_store_memory,
224 resources, redis_max_memory)
225 assert spec.resolved()
226 return spec
227
228
229 def _autodetect_num_gpus():
230 """Attempt to detect the number of GPUs on this machine.
231
232 TODO(rkn): This currently assumes Nvidia GPUs and Linux.
233
234 Returns:
235 The number of GPUs if any were detected, otherwise 0.
236 """
237 proc_gpus_path = "/proc/driver/nvidia/gpus"
238 if os.path.isdir(proc_gpus_path):
239 return len(os.listdir(proc_gpus_path))
240 return 0
241
[end of python/ray/resource_spec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/resource_spec.py b/python/ray/resource_spec.py
--- a/python/ray/resource_spec.py
+++ b/python/ray/resource_spec.py
@@ -3,6 +3,8 @@
import logging
import multiprocessing
import os
+import subprocess
+import sys
import ray
import ray.ray_constants as ray_constants
@@ -229,12 +231,23 @@
def _autodetect_num_gpus():
"""Attempt to detect the number of GPUs on this machine.
- TODO(rkn): This currently assumes Nvidia GPUs and Linux.
+ TODO(rkn): This currently assumes NVIDIA GPUs on Linux.
+ TODO(mehrdadn): This currently does not work on macOS.
+ TODO(mehrdadn): Use a better mechanism for Windows.
+
+ Possibly useful: tensorflow.config.list_physical_devices()
Returns:
The number of GPUs if any were detected, otherwise 0.
"""
- proc_gpus_path = "/proc/driver/nvidia/gpus"
- if os.path.isdir(proc_gpus_path):
- return len(os.listdir(proc_gpus_path))
- return 0
+ result = 0
+ if sys.platform.startswith("linux"):
+ proc_gpus_path = "/proc/driver/nvidia/gpus"
+ if os.path.isdir(proc_gpus_path):
+ result = len(os.listdir(proc_gpus_path))
+ elif sys.platform == "win32":
+ props = "AdapterCompatibility"
+ cmdargs = ["WMIC", "PATH", "Win32_VideoController", "GET", props]
+ lines = subprocess.check_output(cmdargs).splitlines()[1:]
+ result = len([l.rstrip() for l in lines if l.startswith(b"NVIDIA")])
+ return result
| {"golden_diff": "diff --git a/python/ray/resource_spec.py b/python/ray/resource_spec.py\n--- a/python/ray/resource_spec.py\n+++ b/python/ray/resource_spec.py\n@@ -3,6 +3,8 @@\n import logging\n import multiprocessing\n import os\n+import subprocess\n+import sys\n \n import ray\n import ray.ray_constants as ray_constants\n@@ -229,12 +231,23 @@\n def _autodetect_num_gpus():\n \"\"\"Attempt to detect the number of GPUs on this machine.\n \n- TODO(rkn): This currently assumes Nvidia GPUs and Linux.\n+ TODO(rkn): This currently assumes NVIDIA GPUs on Linux.\n+ TODO(mehrdadn): This currently does not work on macOS.\n+ TODO(mehrdadn): Use a better mechanism for Windows.\n+\n+ Possibly useful: tensorflow.config.list_physical_devices()\n \n Returns:\n The number of GPUs if any were detected, otherwise 0.\n \"\"\"\n- proc_gpus_path = \"/proc/driver/nvidia/gpus\"\n- if os.path.isdir(proc_gpus_path):\n- return len(os.listdir(proc_gpus_path))\n- return 0\n+ result = 0\n+ if sys.platform.startswith(\"linux\"):\n+ proc_gpus_path = \"/proc/driver/nvidia/gpus\"\n+ if os.path.isdir(proc_gpus_path):\n+ result = len(os.listdir(proc_gpus_path))\n+ elif sys.platform == \"win32\":\n+ props = \"AdapterCompatibility\"\n+ cmdargs = [\"WMIC\", \"PATH\", \"Win32_VideoController\", \"GET\", props]\n+ lines = subprocess.check_output(cmdargs).splitlines()[1:]\n+ result = len([l.rstrip() for l in lines if l.startswith(b\"NVIDIA\")])\n+ return result\n", "issue": "[tune][rllib] Windows: GPU not recognized\n### What is the problem?\r\n\r\nI'm getting `ray.tune.error.TuneError: Insufficient cluster resources to launch trial`.\r\nI specified a GPU in my config but ray does not recognize my GPU (RTX 2080) and throws an error.\r\nI can get passed this by setting `num_gpus: 0` in my config for now.\r\n\r\nhttps://gist.github.com/juliusfrost/fa7ebbb8d1dfc66eea0bbc4babcbe5aa\r\n\r\n### Reproduction (REQUIRED)\r\n\r\n```\r\ngit clone https://github.com/juliusfrost/rllib-tune-atari.git\r\ncd rllib-tune-atari\r\npip install -r requirements.txt\r\npython train.py --algo a2c\r\n```\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "import math\nfrom collections import namedtuple\nimport logging\nimport multiprocessing\nimport os\n\nimport ray\nimport ray.ray_constants as ray_constants\n\nlogger = logging.getLogger(__name__)\n\n# Prefix for the node id resource that is automatically added to each node.\n# For example, a node may have id `node:172.23.42.1`.\nNODE_ID_PREFIX = \"node:\"\n\n\nclass ResourceSpec(\n namedtuple(\"ResourceSpec\", [\n \"num_cpus\", \"num_gpus\", \"memory\", \"object_store_memory\",\n \"resources\", \"redis_max_memory\"\n ])):\n \"\"\"Represents the resource configuration passed to a raylet.\n\n All fields can be None. Before starting services, resolve() should be\n called to return a ResourceSpec with unknown values filled in with\n defaults based on the local machine specifications.\n\n Attributes:\n num_cpus: The CPUs allocated for this raylet.\n num_gpus: The GPUs allocated for this raylet.\n memory: The memory allocated for this raylet.\n object_store_memory: The object store memory allocated for this raylet.\n Note that when calling to_resource_dict(), this will be scaled down\n by 30% to account for the global plasma LRU reserve.\n resources: The custom resources allocated for this raylet.\n redis_max_memory: The max amount of memory (in bytes) to allow each\n redis shard to use. Once the limit is exceeded, redis will start\n LRU eviction of entries. This only applies to the sharded redis\n tables (task, object, and profile tables). By default, this is\n capped at 10GB but can be set higher.\n \"\"\"\n\n def __new__(cls,\n num_cpus=None,\n num_gpus=None,\n memory=None,\n object_store_memory=None,\n resources=None,\n redis_max_memory=None):\n return super(ResourceSpec, cls).__new__(cls, num_cpus, num_gpus,\n memory, object_store_memory,\n resources, redis_max_memory)\n\n def resolved(self):\n \"\"\"Returns if this ResourceSpec has default values filled out.\"\"\"\n for v in self._asdict().values():\n if v is None:\n return False\n return True\n\n def to_resource_dict(self):\n \"\"\"Returns a dict suitable to pass to raylet initialization.\n\n This renames num_cpus / num_gpus to \"CPU\" / \"GPU\", translates memory\n from bytes into 100MB memory units, and checks types.\n \"\"\"\n assert self.resolved()\n\n memory_units = ray_constants.to_memory_units(\n self.memory, round_up=False)\n reservable_object_store_memory = (\n self.object_store_memory *\n ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION)\n if (reservable_object_store_memory <\n ray_constants.MEMORY_RESOURCE_UNIT_BYTES):\n raise ValueError(\n \"The minimum amount of object_store_memory that can be \"\n \"requested is {}, but you specified {}.\".format(\n int(\n math.ceil(\n ray_constants.MEMORY_RESOURCE_UNIT_BYTES /\n ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION)),\n self.object_store_memory))\n object_store_memory_units = ray_constants.to_memory_units(\n self.object_store_memory *\n ray_constants.PLASMA_RESERVABLE_MEMORY_FRACTION,\n round_up=False)\n\n resources = dict(\n self.resources,\n CPU=self.num_cpus,\n GPU=self.num_gpus,\n memory=memory_units,\n object_store_memory=object_store_memory_units)\n\n resources = {\n resource_label: resource_quantity\n for resource_label, resource_quantity in resources.items()\n if resource_quantity != 0\n }\n\n # Check types.\n for resource_label, resource_quantity in resources.items():\n assert (isinstance(resource_quantity, int)\n or isinstance(resource_quantity, float))\n if (isinstance(resource_quantity, float)\n and not resource_quantity.is_integer()):\n raise ValueError(\n \"Resource quantities must all be whole numbers. \"\n \"Violated by resource '{}' in {}.\".format(\n resource_label, resources))\n if resource_quantity < 0:\n raise ValueError(\"Resource quantities must be nonnegative. \"\n \"Violated by resource '{}' in {}.\".format(\n resource_label, resources))\n if resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY:\n raise ValueError(\"Resource quantities must be at most {}. \"\n \"Violated by resource '{}' in {}.\".format(\n ray_constants.MAX_RESOURCE_QUANTITY,\n resource_label, resources))\n\n return resources\n\n def resolve(self, is_head, node_ip_address=None):\n \"\"\"Returns a copy with values filled out with system defaults.\n\n Args:\n is_head (bool): Whether this is the head node.\n node_ip_address (str): The IP address of the node that we are on.\n This is used to automatically create a node id resource.\n \"\"\"\n\n resources = (self.resources or {}).copy()\n assert \"CPU\" not in resources, resources\n assert \"GPU\" not in resources, resources\n assert \"memory\" not in resources, resources\n assert \"object_store_memory\" not in resources, resources\n\n if node_ip_address is None:\n node_ip_address = ray.services.get_node_ip_address()\n\n # Automatically create a node id resource on each node. This is\n # queryable with ray.state.node_ids() and ray.state.current_node_id().\n resources[NODE_ID_PREFIX + node_ip_address] = 1.0\n\n num_cpus = self.num_cpus\n if num_cpus is None:\n num_cpus = multiprocessing.cpu_count()\n\n num_gpus = self.num_gpus\n gpu_ids = ray.utils.get_cuda_visible_devices()\n # Check that the number of GPUs that the raylet wants doesn't\n # excede the amount allowed by CUDA_VISIBLE_DEVICES.\n if (num_gpus is not None and gpu_ids is not None\n and num_gpus > len(gpu_ids)):\n raise ValueError(\"Attempting to start raylet with {} GPUs, \"\n \"but CUDA_VISIBLE_DEVICES contains {}.\".format(\n num_gpus, gpu_ids))\n if num_gpus is None:\n # Try to automatically detect the number of GPUs.\n num_gpus = _autodetect_num_gpus()\n # Don't use more GPUs than allowed by CUDA_VISIBLE_DEVICES.\n if gpu_ids is not None:\n num_gpus = min(num_gpus, len(gpu_ids))\n\n # Choose a default object store size.\n system_memory = ray.utils.get_system_memory()\n avail_memory = ray.utils.estimate_available_memory()\n object_store_memory = self.object_store_memory\n if object_store_memory is None:\n object_store_memory = int(avail_memory * 0.3)\n # Cap memory to avoid memory waste and perf issues on large nodes\n if (object_store_memory >\n ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES):\n logger.debug(\n \"Warning: Capping object memory store to {}GB. \".format(\n ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES //\n 1e9) +\n \"To increase this further, specify `object_store_memory` \"\n \"when calling ray.init() or ray start.\")\n object_store_memory = (\n ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES)\n\n redis_max_memory = self.redis_max_memory\n if redis_max_memory is None:\n redis_max_memory = min(\n ray_constants.DEFAULT_REDIS_MAX_MEMORY_BYTES,\n max(\n int(avail_memory * 0.1),\n ray_constants.REDIS_MINIMUM_MEMORY_BYTES))\n if redis_max_memory < ray_constants.REDIS_MINIMUM_MEMORY_BYTES:\n raise ValueError(\n \"Attempting to cap Redis memory usage at {} bytes, \"\n \"but the minimum allowed is {} bytes.\".format(\n redis_max_memory,\n ray_constants.REDIS_MINIMUM_MEMORY_BYTES))\n\n memory = self.memory\n if memory is None:\n memory = (avail_memory - object_store_memory - (redis_max_memory\n if is_head else 0))\n if memory < 100e6 and memory < 0.05 * system_memory:\n raise ValueError(\n \"After taking into account object store and redis memory \"\n \"usage, the amount of memory on this node available for \"\n \"tasks and actors ({} GB) is less than {}% of total. \"\n \"You can adjust these settings with \"\n \"ray.init(memory=<bytes>, \"\n \"object_store_memory=<bytes>).\".format(\n round(memory / 1e9, 2),\n int(100 * (memory / system_memory))))\n\n logger.info(\n \"Starting Ray with {} GiB memory available for workers and up to \"\n \"{} GiB for objects. You can adjust these settings \"\n \"with ray.init(memory=<bytes>, \"\n \"object_store_memory=<bytes>).\".format(\n round(\n ray_constants.round_to_memory_units(\n memory, round_up=False) / (1024**3), 2),\n round(object_store_memory / (1024**3), 2)))\n\n spec = ResourceSpec(num_cpus, num_gpus, memory, object_store_memory,\n resources, redis_max_memory)\n assert spec.resolved()\n return spec\n\n\ndef _autodetect_num_gpus():\n \"\"\"Attempt to detect the number of GPUs on this machine.\n\n TODO(rkn): This currently assumes Nvidia GPUs and Linux.\n\n Returns:\n The number of GPUs if any were detected, otherwise 0.\n \"\"\"\n proc_gpus_path = \"/proc/driver/nvidia/gpus\"\n if os.path.isdir(proc_gpus_path):\n return len(os.listdir(proc_gpus_path))\n return 0\n", "path": "python/ray/resource_spec.py"}]} | 3,438 | 391 |
gh_patches_debug_30245 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-6569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setuptools 60.7.0 breaks the executable
Building with setuptools==60.7.0 I get this error.
There is no `jaraco` module (seems like this is the author of `setuptools)
```
(simulator_venv) ➜ agent git:(feature/SB-22361-improve-socket-error-handling) ✗ dist/sbsimulator/sbsimulator
Traceback (most recent call last):
File "PyInstaller/hooks/rthooks/pyi_rth_pkgres.py", line 16, in <module>
File "/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 74, in <module>
from pkg_resources.extern.jaraco.text import (
File "/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py", line 52, in create_module
return self.load_module(spec.name)
File "/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py", line 44, in load_module
raise ImportError(
ImportError: The 'jaraco' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.
[23421] Failed to execute script 'pyi_rth_pkgres' due to unhandled exception!
```
PyInstaller: 4.8
OS: Linux/Mac (not tested on Windows)
Python: 3.8.10
</issue>
<code>
[start of PyInstaller/hooks/hook-pkg_resources.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 from PyInstaller.utils.hooks import collect_submodules
13
14 # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose
15 # them as pkg_resources.extern.*
16 hiddenimports = collect_submodules('pkg_resources._vendor')
17
18 # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if
19 # we would replace this by a fake module.
20 hiddenimports.append('pkg_resources.py2_warn')
21
22 excludedimports = ['__main__']
23
24 # Some more hidden imports. See:
25 # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be
26 # its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if
27 # present.
28 hiddenimports += collect_submodules('packaging')
29
30 hiddenimports += ['pkg_resources.markers']
31
[end of PyInstaller/hooks/hook-pkg_resources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py
--- a/PyInstaller/hooks/hook-pkg_resources.py
+++ b/PyInstaller/hooks/hook-pkg_resources.py
@@ -9,7 +9,7 @@
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
-from PyInstaller.utils.hooks import collect_submodules
+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies
# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose
# them as pkg_resources.extern.*
@@ -28,3 +28,24 @@
hiddenimports += collect_submodules('packaging')
hiddenimports += ['pkg_resources.markers']
+
+# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco
+# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook
+# utilities.
+#
+# In setuptools 60.7.0, the vendored jaraco.text package included "Lorem Ipsum.txt" data file, which also has to be
+# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's
+# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.
+if is_module_satisfies("setuptools == 60.7.0"):
+ raise SystemExit(
+ "ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. "
+ "Downgrade to an earlier version or upgrade to a later version."
+ )
+# In setuptools 60.7.1, the "Lorem Ipsum.txt" data file was dropped from the vendored jaraco.text package, so we can
+# accommodate it with couple of hidden imports.
+elif is_module_satisfies("setuptools >= 60.7.1"):
+ hiddenimports += [
+ 'pkg_resources._vendor.jaraco.functools',
+ 'pkg_resources._vendor.jaraco.context',
+ 'pkg_resources._vendor.jaraco.text',
+ ]
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -9,7 +9,7 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n-from PyInstaller.utils.hooks import collect_submodules\n+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n \n # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n # them as pkg_resources.extern.*\n@@ -28,3 +28,24 @@\n hiddenimports += collect_submodules('packaging')\n \n hiddenimports += ['pkg_resources.markers']\n+\n+# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n+# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n+# utilities.\n+#\n+# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n+# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n+# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\n+if is_module_satisfies(\"setuptools == 60.7.0\"):\n+ raise SystemExit(\n+ \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n+ \"Downgrade to an earlier version or upgrade to a later version.\"\n+ )\n+# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n+# accommodate it with couple of hidden imports.\n+elif is_module_satisfies(\"setuptools >= 60.7.1\"):\n+ hiddenimports += [\n+ 'pkg_resources._vendor.jaraco.functools',\n+ 'pkg_resources._vendor.jaraco.context',\n+ 'pkg_resources._vendor.jaraco.text',\n+ ]\n", "issue": "Setuptools 60.7.0 breaks the executable\nBuilding with setuptools==60.7.0 I get this error.\r\nThere is no `jaraco` module (seems like this is the author of `setuptools)\r\n```\r\n(simulator_venv) \u279c agent git:(feature/SB-22361-improve-socket-error-handling) \u2717 dist/sbsimulator/sbsimulator\r\nTraceback (most recent call last):\r\n File \"PyInstaller/hooks/rthooks/pyi_rth_pkgres.py\", line 16, in <module>\r\n File \"/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 74, in <module>\r\n from pkg_resources.extern.jaraco.text import (\r\n File \"/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py\", line 52, in create_module\r\n return self.load_module(spec.name)\r\n File \"/Users/arossert/Shared/Bitbucket/agent/simulator_venv/lib/python3.8/site-packages/pkg_resources/extern/__init__.py\", line 44, in load_module\r\n raise ImportError(\r\nImportError: The 'jaraco' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.\r\n[23421] Failed to execute script 'pyi_rth_pkgres' due to unhandled exception!\r\n```\r\n\r\nPyInstaller: 4.8\r\nOS: Linux/Mac (not tested on Windows)\r\nPython: 3.8.10\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nhiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n# present.\nhiddenimports += collect_submodules('packaging')\n\nhiddenimports += ['pkg_resources.markers']\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]} | 1,266 | 485 |
gh_patches_debug_24447 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4316 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_157 False Positive on Aurora
**Describe the issue**
CKV_AWS_157 fails on Aurora instances even though the `MultiAZ` property is not applicable to Aurora
**Examples**
```
RDSinstance:
Type: AWS::RDS::DBInstance
Properties:
DBClusterIdentifier: !Ref DBCluster
DBInstanceClass: !Ref DbType
DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}
DBParameterGroupName: !Ref DbParameterGroup
DBSubnetGroupName: !Ref DBSubnetGroup
Engine: aurora-mysql
MonitoringInterval: "60"
MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn
PubliclyAccessible: 'false'
```
**Version (please complete the following information):**
- Checkov Version 2.2.270
**Additional context**
- AWS docs: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz
<img width="1377" alt="Screenshot 2023-01-19 at 1 25 49 PM" src="https://user-images.githubusercontent.com/1328683/213552592-4736b84a-3926-44b2-9dad-196b68f28daa.png">
</issue>
<code>
[start of checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py]
1 from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3
4
5 class RDSMultiAZEnabled(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that RDS instances have Multi-AZ enabled"
8 id = "CKV_AWS_157"
9 supported_resources = ['AWS::RDS::DBInstance']
10 categories = [CheckCategories.NETWORKING]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'Properties/MultiAZ'
15
16
17 check = RDSMultiAZEnabled()
18
[end of checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py
--- a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py
+++ b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py
@@ -1,5 +1,5 @@
+from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
class RDSMultiAZEnabled(BaseResourceValueCheck):
@@ -10,6 +10,16 @@
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+ def scan_resource_conf(self, conf):
+ # Aurora is replicated across all AZs and doesn't require MultiAZ to be set
+ # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz
+ if 'Properties' in conf.keys():
+ if 'Engine' in conf['Properties'].keys():
+ if 'aurora' in conf['Properties']['Engine']:
+ return CheckResult.UNKNOWN
+ # Database is not Aurora; Use base class implementation
+ return super().scan_resource_conf(conf)
+
def get_inspected_key(self):
return 'Properties/MultiAZ'
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py\n--- a/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py\n+++ b/checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py\n@@ -1,5 +1,5 @@\n+from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n \n \n class RDSMultiAZEnabled(BaseResourceValueCheck):\n@@ -10,6 +10,16 @@\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n+ def scan_resource_conf(self, conf):\n+ # Aurora is replicated across all AZs and doesn't require MultiAZ to be set\n+ # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz\n+ if 'Properties' in conf.keys():\n+ if 'Engine' in conf['Properties'].keys():\n+ if 'aurora' in conf['Properties']['Engine']:\n+ return CheckResult.UNKNOWN\n+ # Database is not Aurora; Use base class implementation\n+ return super().scan_resource_conf(conf)\n+ \n def get_inspected_key(self):\n return 'Properties/MultiAZ'\n", "issue": "CKV_AWS_157 False Positive on Aurora\n**Describe the issue**\r\nCKV_AWS_157 fails on Aurora instances even though the `MultiAZ` property is not applicable to Aurora\r\n\r\n**Examples**\r\n```\r\nRDSinstance:\r\n Type: AWS::RDS::DBInstance\r\n Properties:\r\n DBClusterIdentifier: !Ref DBCluster\r\n DBInstanceClass: !Ref DbType\r\n DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}\r\n DBParameterGroupName: !Ref DbParameterGroup\r\n DBSubnetGroupName: !Ref DBSubnetGroup\r\n Engine: aurora-mysql\r\n MonitoringInterval: \"60\"\r\n MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn\r\n PubliclyAccessible: 'false'\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.270\r\n\r\n**Additional context**\r\n- AWS docs: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#cfn-rds-dbinstance-multiaz\r\n\r\n<img width=\"1377\" alt=\"Screenshot 2023-01-19 at 1 25 49 PM\" src=\"https://user-images.githubusercontent.com/1328683/213552592-4736b84a-3926-44b2-9dad-196b68f28daa.png\">\r\n\n", "before_files": [{"content": "from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass RDSMultiAZEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that RDS instances have Multi-AZ enabled\"\n id = \"CKV_AWS_157\"\n supported_resources = ['AWS::RDS::DBInstance']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'Properties/MultiAZ'\n\n\ncheck = RDSMultiAZEnabled()\n", "path": "checkov/cloudformation/checks/resource/aws/RDSMultiAZEnabled.py"}]} | 1,052 | 324 |
gh_patches_debug_32293 | rasdani/github-patches | git_diff | translate__pootle-5675 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move session data into session
atm there is data like sidebar stuff that is stored in the actual cookie.
apart from slowing down normal users (they send all that data in *every* request), bots are acumulating cookie cruft and needlessly posting that at our sites.
if the cookie cruft gets too long it gets truncated and errors out
</issue>
<code>
[start of pootle/core/helpers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import json
10 from urllib import quote, unquote
11
12 from django.utils import dateformat
13
14
15 SIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar'
16
17
18 def get_sidebar_announcements_context(request, objects):
19 """Return the announcements context for the browser pages sidebar.
20
21 :param request: a :cls:`django.http.HttpRequest` object.
22 :param objects: a tuple of Project, Language and TranslationProject to
23 retrieve the announcements for. Any of those can be
24 missing, but it is recommended for them to be in that exact
25 order.
26 """
27 announcements = []
28 new_cookie_data = {}
29 cookie_data = {}
30
31 if SIDEBAR_COOKIE_NAME in request.COOKIES:
32 json_str = unquote(request.COOKIES[SIDEBAR_COOKIE_NAME])
33 cookie_data = json.loads(json_str)
34
35 is_sidebar_open = cookie_data.get('isOpen', True)
36
37 for item in objects:
38 announcement = item.get_announcement(request.user)
39
40 if announcement is None:
41 continue
42
43 announcements.append(announcement)
44 # The virtual_path cannot be used as is for JSON.
45 ann_key = announcement.virtual_path.replace('/', '_')
46 ann_mtime = dateformat.format(announcement.modified_on, 'U')
47 stored_mtime = cookie_data.get(ann_key, None)
48
49 if ann_mtime != stored_mtime:
50 new_cookie_data[ann_key] = ann_mtime
51
52 if new_cookie_data:
53 # Some announcement has been changed or was never displayed before, so
54 # display sidebar and save the changed mtimes in the cookie to not
55 # display it next time unless it is necessary.
56 is_sidebar_open = True
57 cookie_data.update(new_cookie_data)
58 new_cookie_data = quote(json.dumps(cookie_data))
59
60 ctx = {
61 'announcements': announcements,
62 'is_sidebar_open': is_sidebar_open,
63 'has_sidebar': len(announcements) > 0,
64 }
65
66 return ctx, new_cookie_data
67
[end of pootle/core/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/core/helpers.py b/pootle/core/helpers.py
--- a/pootle/core/helpers.py
+++ b/pootle/core/helpers.py
@@ -12,7 +12,7 @@
from django.utils import dateformat
-SIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar'
+SIDEBAR_COOKIE_NAME = 'pootle-browser-open-sidebar'
def get_sidebar_announcements_context(request, objects):
@@ -24,6 +24,7 @@
missing, but it is recommended for them to be in that exact
order.
"""
+ must_show_announcement = False
announcements = []
new_cookie_data = {}
cookie_data = {}
@@ -41,20 +42,20 @@
continue
announcements.append(announcement)
- # The virtual_path cannot be used as is for JSON.
- ann_key = announcement.virtual_path.replace('/', '_')
+
ann_mtime = dateformat.format(announcement.modified_on, 'U')
- stored_mtime = cookie_data.get(ann_key, None)
+ stored_mtime = request.session.get(announcement.virtual_path, None)
if ann_mtime != stored_mtime:
- new_cookie_data[ann_key] = ann_mtime
+ # Some announcement has been changed or was never displayed before,
+ # so display sidebar and save the changed mtimes in the session to
+ # not display it next time unless it is necessary.
+ must_show_announcement = True
+ request.session[announcement.virtual_path] = ann_mtime
- if new_cookie_data:
- # Some announcement has been changed or was never displayed before, so
- # display sidebar and save the changed mtimes in the cookie to not
- # display it next time unless it is necessary.
+ if must_show_announcement and not is_sidebar_open:
is_sidebar_open = True
- cookie_data.update(new_cookie_data)
+ cookie_data['isOpen'] = is_sidebar_open
new_cookie_data = quote(json.dumps(cookie_data))
ctx = {
| {"golden_diff": "diff --git a/pootle/core/helpers.py b/pootle/core/helpers.py\n--- a/pootle/core/helpers.py\n+++ b/pootle/core/helpers.py\n@@ -12,7 +12,7 @@\n from django.utils import dateformat\n \n \n-SIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar'\n+SIDEBAR_COOKIE_NAME = 'pootle-browser-open-sidebar'\n \n \n def get_sidebar_announcements_context(request, objects):\n@@ -24,6 +24,7 @@\n missing, but it is recommended for them to be in that exact\n order.\n \"\"\"\n+ must_show_announcement = False\n announcements = []\n new_cookie_data = {}\n cookie_data = {}\n@@ -41,20 +42,20 @@\n continue\n \n announcements.append(announcement)\n- # The virtual_path cannot be used as is for JSON.\n- ann_key = announcement.virtual_path.replace('/', '_')\n+\n ann_mtime = dateformat.format(announcement.modified_on, 'U')\n- stored_mtime = cookie_data.get(ann_key, None)\n+ stored_mtime = request.session.get(announcement.virtual_path, None)\n \n if ann_mtime != stored_mtime:\n- new_cookie_data[ann_key] = ann_mtime\n+ # Some announcement has been changed or was never displayed before,\n+ # so display sidebar and save the changed mtimes in the session to\n+ # not display it next time unless it is necessary.\n+ must_show_announcement = True\n+ request.session[announcement.virtual_path] = ann_mtime\n \n- if new_cookie_data:\n- # Some announcement has been changed or was never displayed before, so\n- # display sidebar and save the changed mtimes in the cookie to not\n- # display it next time unless it is necessary.\n+ if must_show_announcement and not is_sidebar_open:\n is_sidebar_open = True\n- cookie_data.update(new_cookie_data)\n+ cookie_data['isOpen'] = is_sidebar_open\n new_cookie_data = quote(json.dumps(cookie_data))\n \n ctx = {\n", "issue": "Move session data into session\natm there is data like sidebar stuff that is stored in the actual cookie.\r\n\r\napart from slowing down normal users (they send all that data in *every* request), bots are acumulating cookie cruft and needlessly posting that at our sites.\r\n\r\nif the cookie cruft gets too long it gets truncated and errors out\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport json\nfrom urllib import quote, unquote\n\nfrom django.utils import dateformat\n\n\nSIDEBAR_COOKIE_NAME = 'pootle-browser-sidebar'\n\n\ndef get_sidebar_announcements_context(request, objects):\n \"\"\"Return the announcements context for the browser pages sidebar.\n\n :param request: a :cls:`django.http.HttpRequest` object.\n :param objects: a tuple of Project, Language and TranslationProject to\n retrieve the announcements for. Any of those can be\n missing, but it is recommended for them to be in that exact\n order.\n \"\"\"\n announcements = []\n new_cookie_data = {}\n cookie_data = {}\n\n if SIDEBAR_COOKIE_NAME in request.COOKIES:\n json_str = unquote(request.COOKIES[SIDEBAR_COOKIE_NAME])\n cookie_data = json.loads(json_str)\n\n is_sidebar_open = cookie_data.get('isOpen', True)\n\n for item in objects:\n announcement = item.get_announcement(request.user)\n\n if announcement is None:\n continue\n\n announcements.append(announcement)\n # The virtual_path cannot be used as is for JSON.\n ann_key = announcement.virtual_path.replace('/', '_')\n ann_mtime = dateformat.format(announcement.modified_on, 'U')\n stored_mtime = cookie_data.get(ann_key, None)\n\n if ann_mtime != stored_mtime:\n new_cookie_data[ann_key] = ann_mtime\n\n if new_cookie_data:\n # Some announcement has been changed or was never displayed before, so\n # display sidebar and save the changed mtimes in the cookie to not\n # display it next time unless it is necessary.\n is_sidebar_open = True\n cookie_data.update(new_cookie_data)\n new_cookie_data = quote(json.dumps(cookie_data))\n\n ctx = {\n 'announcements': announcements,\n 'is_sidebar_open': is_sidebar_open,\n 'has_sidebar': len(announcements) > 0,\n }\n\n return ctx, new_cookie_data\n", "path": "pootle/core/helpers.py"}]} | 1,225 | 443 |
gh_patches_debug_27272 | rasdani/github-patches | git_diff | CTFd__CTFd-1001 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sphinx Documentation
CTFds documentation is lacking and we need a system for it.
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/master/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 # import os
16 # import sys
17 # sys.path.insert(0, os.path.abspath('.'))
18
19
20 # -- Project information -----------------------------------------------------
21
22 project = u'CTFd'
23 copyright = u'2019, Kevin Chung'
24 author = u'Kevin Chung'
25
26 # The short X.Y version
27 version = u''
28 # The full version, including alpha/beta/rc tags
29 release = u'2.1.0'
30
31
32 # -- General configuration ---------------------------------------------------
33
34 # If your documentation needs a minimal Sphinx version, state it here.
35 #
36 # needs_sphinx = '1.0'
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = [
42 'sphinx.ext.intersphinx',
43 'sphinx.ext.todo',
44 'sphinx.ext.imgmath',
45 'sphinx.ext.viewcode',
46 ]
47
48 # Add any paths that contain templates here, relative to this directory.
49 templates_path = ['_templates']
50
51 # The suffix(es) of source filenames.
52 # You can specify multiple suffix as a list of string:
53 #
54 # source_suffix = ['.rst', '.md']
55 source_suffix = '.rst'
56
57 # The master toctree document.
58 master_doc = 'index'
59
60 # The language for content autogenerated by Sphinx. Refer to documentation
61 # for a list of supported languages.
62 #
63 # This is also used if you do content translation via gettext catalogs.
64 # Usually you set "language" from the command line for these cases.
65 language = None
66
67 # List of patterns, relative to source directory, that match files and
68 # directories to ignore when looking for source files.
69 # This pattern also affects html_static_path and html_extra_path.
70 exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
71
72 # The name of the Pygments (syntax highlighting) style to use.
73 pygments_style = None
74
75
76 # -- Options for HTML output -------------------------------------------------
77
78 # The theme to use for HTML and HTML Help pages. See the documentation for
79 # a list of builtin themes.
80 #
81 html_theme = 'alabaster'
82
83 # Theme options are theme-specific and customize the look and feel of a theme
84 # further. For a list of options available for each theme, see the
85 # documentation.
86 #
87 # html_theme_options = {}
88
89 # Add any paths that contain custom static files (such as style sheets) here,
90 # relative to this directory. They are copied after the builtin static files,
91 # so a file named "default.css" will overwrite the builtin "default.css".
92 html_static_path = ['_static']
93
94 # Custom sidebar templates, must be a dictionary that maps document names
95 # to template names.
96 #
97 # The default sidebars (for documents that don't match any pattern) are
98 # defined by theme itself. Builtin themes are using these templates by
99 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
100 # 'searchbox.html']``.
101 #
102 # html_sidebars = {}
103
104
105 # -- Options for HTMLHelp output ---------------------------------------------
106
107 # Output file base name for HTML help builder.
108 htmlhelp_basename = 'CTFddoc'
109
110
111 # -- Options for LaTeX output ------------------------------------------------
112
113 latex_elements = {
114 # The paper size ('letterpaper' or 'a4paper').
115 #
116 # 'papersize': 'letterpaper',
117
118 # The font size ('10pt', '11pt' or '12pt').
119 #
120 # 'pointsize': '10pt',
121
122 # Additional stuff for the LaTeX preamble.
123 #
124 # 'preamble': '',
125
126 # Latex figure (float) alignment
127 #
128 # 'figure_align': 'htbp',
129 }
130
131 # Grouping the document tree into LaTeX files. List of tuples
132 # (source start file, target name, title,
133 # author, documentclass [howto, manual, or own class]).
134 latex_documents = [
135 (master_doc, 'CTFd.tex', u'CTFd Documentation',
136 u'Kevin Chung', 'manual'),
137 ]
138
139
140 # -- Options for manual page output ------------------------------------------
141
142 # One entry per manual page. List of tuples
143 # (source start file, name, description, authors, manual section).
144 man_pages = [
145 (master_doc, 'ctfd', u'CTFd Documentation',
146 [author], 1)
147 ]
148
149
150 # -- Options for Texinfo output ----------------------------------------------
151
152 # Grouping the document tree into Texinfo files. List of tuples
153 # (source start file, target name, title, author,
154 # dir menu entry, description, category)
155 texinfo_documents = [
156 (master_doc, 'CTFd', u'CTFd Documentation',
157 author, 'CTFd', 'One line description of project.',
158 'Miscellaneous'),
159 ]
160
161
162 # -- Options for Epub output -------------------------------------------------
163
164 # Bibliographic Dublin Core info.
165 epub_title = project
166
167 # The unique identifier of the text. This can be a ISBN number
168 # or the project homepage.
169 #
170 # epub_identifier = ''
171
172 # A unique identification for the text.
173 #
174 # epub_uid = ''
175
176 # A list of files that should not be packed into the epub file.
177 epub_exclude_files = ['search.html']
178
179
180 # -- Extension configuration -------------------------------------------------
181
182 # -- Options for intersphinx extension ---------------------------------------
183
184 # Example configuration for intersphinx: refer to the Python standard library.
185 intersphinx_mapping = {'https://docs.python.org/': None}
186
187 # -- Options for todo extension ----------------------------------------------
188
189 # If true, `todo` and `todoList` produce output, else they produce nothing.
190 todo_include_todos = True
191
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,13 +20,13 @@
# -- Project information -----------------------------------------------------
project = u'CTFd'
-copyright = u'2019, Kevin Chung'
+copyright = u'2019, CTFd LLC'
author = u'Kevin Chung'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
-release = u'2.1.0'
+release = u'2.1.2'
# -- General configuration ---------------------------------------------------
@@ -78,7 +78,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'alabaster'
+html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -154,7 +154,7 @@
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CTFd', u'CTFd Documentation',
- author, 'CTFd', 'One line description of project.',
+ author, 'CTFd', 'A Capture The Flag framework focusing on ease of use and customizability.',
'Miscellaneous'),
]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -20,13 +20,13 @@\n # -- Project information -----------------------------------------------------\n \n project = u'CTFd'\n-copyright = u'2019, Kevin Chung'\n+copyright = u'2019, CTFd LLC'\n author = u'Kevin Chung'\n \n # The short X.Y version\n version = u''\n # The full version, including alpha/beta/rc tags\n-release = u'2.1.0'\n+release = u'2.1.2'\n \n \n # -- General configuration ---------------------------------------------------\n@@ -78,7 +78,7 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'alabaster'\n+html_theme = 'sphinx_rtd_theme'\n \n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n@@ -154,7 +154,7 @@\n # dir menu entry, description, category)\n texinfo_documents = [\n (master_doc, 'CTFd', u'CTFd Documentation',\n- author, 'CTFd', 'One line description of project.',\n+ author, 'CTFd', 'A Capture The Flag framework focusing on ease of use and customizability.',\n 'Miscellaneous'),\n ]\n", "issue": "Sphinx Documentation\nCTFds documentation is lacking and we need a system for it. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = u'CTFd'\ncopyright = u'2019, Kevin Chung'\nauthor = u'Kevin Chung'\n\n# The short X.Y version\nversion = u''\n# The full version, including alpha/beta/rc tags\nrelease = u'2.1.0'\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.imgmath',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'CTFddoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'CTFd.tex', u'CTFd Documentation',\n u'Kevin Chung', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ctfd', u'CTFd Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'CTFd', u'CTFd Documentation',\n author, 'CTFd', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/': None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}]} | 2,327 | 319 |
gh_patches_debug_3204 | rasdani/github-patches | git_diff | e-valuation__EvaP-648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow to exclude courses from grader view
It should be possible to exclude courses from the semester overview on the grading page.
Single results should never be listed on the page and there should be a new attribute for each course defining whether the course is shown for graders or not.
This must also include a symbol for staff users, showing them that this course can't get grade documents.
</issue>
<code>
[start of evap/grades/views.py]
1 from django.shortcuts import get_object_or_404, render, redirect
2 from django.db.models import Prefetch
3 from django.contrib import messages
4 from django.utils.translation import ugettext as _
5 from django.http import HttpResponseBadRequest, HttpResponseForbidden
6
7 from sendfile import sendfile
8
9 from evap.evaluation.auth import grade_publisher_required, grade_downloader_required, grade_publisher_or_staff_required
10 from evap.evaluation.models import Semester, Contribution, Course
11 from evap.grades.models import GradeDocument
12 from evap.grades.forms import GradeDocumentForm
13 from evap.evaluation.tools import send_publish_notifications
14
15
16 @grade_publisher_required
17 def index(request):
18 template_data = dict(
19 semesters=Semester.objects.all()
20 )
21 return render(request, "grades_index.html", template_data)
22
23
24 def prefetch_data(courses):
25 courses = courses.prefetch_related(
26 Prefetch("contributions", queryset=Contribution.objects.filter(responsible=True).select_related("contributor"), to_attr="responsible_contribution"),
27 "degrees")
28
29 course_data = []
30 for course in courses:
31 course.responsible_contributor = course.responsible_contribution[0].contributor
32 course_data.append((
33 course,
34 GradeDocument.objects.filter(course=course, type=GradeDocument.MIDTERM_GRADES).count(),
35 GradeDocument.objects.filter(course=course, type=GradeDocument.FINAL_GRADES).count()
36 ))
37
38 return course_data
39
40
41 @grade_publisher_required
42 def semester_view(request, semester_id):
43 semester = get_object_or_404(Semester, id=semester_id)
44
45 courses = semester.course_set.exclude(state='new')
46 courses = prefetch_data(courses)
47
48 template_data = dict(
49 semester=semester,
50 courses=courses,
51 disable_if_archived="disabled=disabled" if semester.is_archived else "",
52 disable_breadcrumb_semester=True,
53 )
54 return render(request, "grades_semester_view.html", template_data)
55
56
57 @grade_publisher_or_staff_required
58 def course_view(request, semester_id, course_id):
59 semester = get_object_or_404(Semester, id=semester_id)
60 course = get_object_or_404(Course, id=course_id)
61 is_grade_publisher = request.user.is_grade_publisher
62
63 template_data = dict(
64 semester=semester,
65 course=course,
66 grade_documents=course.grade_documents.all(),
67 disable_if_archived="disabled=disabled" if semester.is_archived else "",
68 disable_breadcrumb_course=True,
69 is_grade_publisher=is_grade_publisher,
70 )
71 return render(request, "grades_course_view.html", template_data)
72
73
74 @grade_publisher_required
75 def upload_grades(request, semester_id, course_id):
76 semester = get_object_or_404(Semester, id=semester_id)
77 course = get_object_or_404(Course, id=course_id)
78
79 final_grades = request.GET.get('final', 'false') # default: midterm grades
80 final_grades = {'true': True, 'false': False}.get(final_grades.lower()) # convert parameter to boolean
81
82 form = GradeDocumentForm(request.POST or None, request.FILES or None, course=course, final_grades=final_grades, user=request.user)
83
84 if form.is_valid():
85 form.save()
86 if final_grades and course.state == 'reviewed':
87 course.publish()
88 course.save()
89 send_publish_notifications(grade_document_courses=[course], evaluation_results_courses=[course])
90 else:
91 send_publish_notifications(grade_document_courses=[course])
92
93 messages.success(request, _("Successfully uploaded grades."))
94 return redirect('grades:course_view', semester.id, course.id)
95 else:
96 template_data = dict(
97 semester=semester,
98 course=course,
99 form=form,
100 final_grades=final_grades,
101 show_automated_publishing_info=final_grades,
102 )
103 return render(request, "grades_upload_form.html", template_data)
104
105
106 @grade_publisher_required
107 def toggle_no_grades(request, semester_id, course_id):
108 semester = get_object_or_404(Semester, id=semester_id)
109 course = get_object_or_404(Course, id=course_id)
110
111 if request.method == 'POST':
112 course.gets_no_grade_documents = not course.gets_no_grade_documents
113 course.save()
114
115 if course.gets_no_grade_documents:
116 if course.state == 'reviewed':
117 course.publish()
118 course.save()
119 send_publish_notifications(evaluation_results_courses=[course])
120 messages.success(request, _("Successfully confirmed that no grade documents will be provided."))
121 else:
122 messages.success(request, _("Successfully confirmed that grade documents will be provided later on."))
123 return redirect('grades:semester_view', semester_id)
124 else:
125 template_data = dict(
126 semester=semester,
127 course=course,
128 )
129 return render(request, "toggle_no_grades.html", template_data)
130
131
132 @grade_downloader_required
133 def download_grades(request, grade_document_id):
134 if not request.method == "GET":
135 return HttpResponseBadRequest()
136
137 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
138 return sendfile(request, grade_document.file.path, attachment=True, attachment_filename=grade_document.filename())
139
140
141 @grade_publisher_required
142 def edit_grades(request, semester_id, course_id, grade_document_id):
143 semester = get_object_or_404(Semester, id=semester_id)
144 course = get_object_or_404(Course, id=course_id)
145 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
146 final_grades = grade_document.type == GradeDocument.FINAL_GRADES
147
148 form = GradeDocumentForm(request.POST or None, request.FILES or None, course=course, final_grades=final_grades, instance=grade_document, user=request.user)
149
150 if form.is_valid():
151 form.save()
152 messages.success(request, _("Successfully updated grades."))
153 return redirect('grades:course_view', semester.id, course.id)
154 else:
155 template_data = dict(
156 semester=semester,
157 course=course,
158 form=form,
159 show_automated_publishing_info=False,
160 )
161 return render(request, "grades_upload_form.html", template_data)
162
163
164 @grade_publisher_required
165 def delete_grades(request, semester_id, course_id, grade_document_id):
166 semester = get_object_or_404(Semester, id=semester_id)
167 course = get_object_or_404(Course, id=course_id)
168 grade_document = get_object_or_404(GradeDocument, id=grade_document_id)
169
170 if request.method == 'POST':
171 grade_document.delete()
172 messages.success(request, _("Successfully deleted grade document."))
173 return redirect('grades:course_view', semester_id, course_id)
174 else:
175 template_data = dict(
176 semester=semester,
177 course=course,
178 grade_document=grade_document,
179 )
180 return render(request, "grades_delete.html", template_data)
181
[end of evap/grades/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/grades/views.py b/evap/grades/views.py
--- a/evap/grades/views.py
+++ b/evap/grades/views.py
@@ -42,7 +42,7 @@
def semester_view(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
- courses = semester.course_set.exclude(state='new')
+ courses = semester.course_set.filter(is_graded=True).exclude(state='new')
courses = prefetch_data(courses)
template_data = dict(
| {"golden_diff": "diff --git a/evap/grades/views.py b/evap/grades/views.py\n--- a/evap/grades/views.py\n+++ b/evap/grades/views.py\n@@ -42,7 +42,7 @@\n def semester_view(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n \n- courses = semester.course_set.exclude(state='new')\n+ courses = semester.course_set.filter(is_graded=True).exclude(state='new')\n courses = prefetch_data(courses)\n \n template_data = dict(\n", "issue": "Allow to exclude courses from grader view\nIt should be possible to exclude courses from the semester overview on the grading page.\nSingle results should never be listed on the page and there should be a new attribute for each course defining whether the course is shown for graders or not.\nThis must also include a symbol for staff users, showing them that this course can't get grade documents.\n\n", "before_files": [{"content": "from django.shortcuts import get_object_or_404, render, redirect\nfrom django.db.models import Prefetch\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext as _\nfrom django.http import HttpResponseBadRequest, HttpResponseForbidden\n\nfrom sendfile import sendfile\n\nfrom evap.evaluation.auth import grade_publisher_required, grade_downloader_required, grade_publisher_or_staff_required\nfrom evap.evaluation.models import Semester, Contribution, Course\nfrom evap.grades.models import GradeDocument\nfrom evap.grades.forms import GradeDocumentForm\nfrom evap.evaluation.tools import send_publish_notifications\n\n\n@grade_publisher_required\ndef index(request):\n template_data = dict(\n semesters=Semester.objects.all()\n )\n return render(request, \"grades_index.html\", template_data)\n\n\ndef prefetch_data(courses):\n courses = courses.prefetch_related(\n Prefetch(\"contributions\", queryset=Contribution.objects.filter(responsible=True).select_related(\"contributor\"), to_attr=\"responsible_contribution\"),\n \"degrees\")\n\n course_data = []\n for course in courses:\n course.responsible_contributor = course.responsible_contribution[0].contributor\n course_data.append((\n course,\n GradeDocument.objects.filter(course=course, type=GradeDocument.MIDTERM_GRADES).count(),\n GradeDocument.objects.filter(course=course, type=GradeDocument.FINAL_GRADES).count()\n ))\n\n return course_data\n\n\n@grade_publisher_required\ndef semester_view(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n\n courses = semester.course_set.exclude(state='new')\n courses = prefetch_data(courses)\n\n template_data = dict(\n semester=semester,\n courses=courses,\n disable_if_archived=\"disabled=disabled\" if semester.is_archived else \"\",\n disable_breadcrumb_semester=True,\n )\n return render(request, \"grades_semester_view.html\", template_data)\n\n\n@grade_publisher_or_staff_required\ndef course_view(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(Course, id=course_id)\n is_grade_publisher = request.user.is_grade_publisher\n\n template_data = dict(\n semester=semester,\n course=course,\n grade_documents=course.grade_documents.all(),\n disable_if_archived=\"disabled=disabled\" if semester.is_archived else \"\",\n disable_breadcrumb_course=True,\n is_grade_publisher=is_grade_publisher,\n )\n return render(request, \"grades_course_view.html\", template_data)\n\n\n@grade_publisher_required\ndef upload_grades(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(Course, id=course_id)\n\n final_grades = request.GET.get('final', 'false') # default: midterm grades\n final_grades = {'true': True, 'false': False}.get(final_grades.lower()) # convert parameter to boolean\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, course=course, final_grades=final_grades, user=request.user)\n\n if form.is_valid():\n form.save()\n if final_grades and course.state == 'reviewed':\n course.publish()\n course.save()\n send_publish_notifications(grade_document_courses=[course], evaluation_results_courses=[course])\n else:\n send_publish_notifications(grade_document_courses=[course]) \n\n messages.success(request, _(\"Successfully uploaded grades.\"))\n return redirect('grades:course_view', semester.id, course.id)\n else:\n template_data = dict(\n semester=semester,\n course=course,\n form=form,\n final_grades=final_grades,\n show_automated_publishing_info=final_grades,\n )\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@grade_publisher_required\ndef toggle_no_grades(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(Course, id=course_id)\n\n if request.method == 'POST':\n course.gets_no_grade_documents = not course.gets_no_grade_documents\n course.save()\n \n if course.gets_no_grade_documents:\n if course.state == 'reviewed':\n course.publish()\n course.save()\n send_publish_notifications(evaluation_results_courses=[course])\n messages.success(request, _(\"Successfully confirmed that no grade documents will be provided.\"))\n else:\n messages.success(request, _(\"Successfully confirmed that grade documents will be provided later on.\"))\n return redirect('grades:semester_view', semester_id)\n else:\n template_data = dict(\n semester=semester,\n course=course,\n )\n return render(request, \"toggle_no_grades.html\", template_data)\n\n\n@grade_downloader_required\ndef download_grades(request, grade_document_id):\n if not request.method == \"GET\":\n return HttpResponseBadRequest()\n\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n return sendfile(request, grade_document.file.path, attachment=True, attachment_filename=grade_document.filename())\n\n\n@grade_publisher_required\ndef edit_grades(request, semester_id, course_id, grade_document_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(Course, id=course_id)\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n final_grades = grade_document.type == GradeDocument.FINAL_GRADES\n\n form = GradeDocumentForm(request.POST or None, request.FILES or None, course=course, final_grades=final_grades, instance=grade_document, user=request.user)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully updated grades.\"))\n return redirect('grades:course_view', semester.id, course.id)\n else:\n template_data = dict(\n semester=semester,\n course=course,\n form=form,\n show_automated_publishing_info=False,\n )\n return render(request, \"grades_upload_form.html\", template_data)\n\n\n@grade_publisher_required\ndef delete_grades(request, semester_id, course_id, grade_document_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(Course, id=course_id)\n grade_document = get_object_or_404(GradeDocument, id=grade_document_id)\n\n if request.method == 'POST':\n grade_document.delete()\n messages.success(request, _(\"Successfully deleted grade document.\"))\n return redirect('grades:course_view', semester_id, course_id)\n else:\n template_data = dict(\n semester=semester,\n course=course,\n grade_document=grade_document,\n )\n return render(request, \"grades_delete.html\", template_data)\n", "path": "evap/grades/views.py"}]} | 2,566 | 125 |
gh_patches_debug_42112 | rasdani/github-patches | git_diff | svthalia__concrexit-3351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle invalid filter values
### Describe the bug
https://thalia.sentry.io/issues/4448548023/?project=1463433&query=is%3Aunresolved+issue.category%3Aerror&referrer=issue-stream&stream_index=8
When passing e.g. "foo" to a boolean filter in some API views, you get 500 Internal Server Error, not 400 Bad Request.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to `https://staging.thalia.nu/api/v2/admin/events/183/registrations/?queued=all`
### Expected behaviour
400
_Note that there are more such filters. Let's fix them all, not just this single one._
</issue>
<code>
[start of website/payments/api/v2/filters.py]
1 from rest_framework import filters
2
3 from utils.snippets import extract_date_range, strtobool
4
5
6 class CreatedAtFilter(filters.BaseFilterBackend):
7 """Allows you to filter by payment creation dates."""
8
9 def filter_queryset(self, request, queryset, view):
10 start, end = extract_date_range(request, allow_empty=True)
11
12 if start is not None:
13 queryset = queryset.filter(created_at__gte=start)
14 if end is not None:
15 queryset = queryset.filter(created_at__lte=end)
16
17 return queryset
18
19 def get_schema_operation_parameters(self, view):
20 return [
21 {
22 "name": "start",
23 "required": False,
24 "in": "query",
25 "description": "Filter payments by ISO date, starting with this parameter (i.e. 2021-03-30T04:20:00) where `payment.created_at >= value`",
26 "schema": {
27 "type": "string",
28 },
29 },
30 {
31 "name": "end",
32 "required": False,
33 "in": "query",
34 "description": "Filter payments by ISO date, ending with this parameter (i.e. 2021-05-16T13:37:00) where `payment.created_at <= value`",
35 "schema": {
36 "type": "string",
37 },
38 },
39 ]
40
41
42 class PaymentTypeFilter(filters.BaseFilterBackend):
43 """Allows you to filter by payment type."""
44
45 def filter_queryset(self, request, queryset, view):
46 payment_type = request.query_params.get("type", None)
47
48 if payment_type:
49 queryset = queryset.filter(type__in=payment_type.split(","))
50
51 return queryset
52
53 def get_schema_operation_parameters(self, view):
54 return [
55 {
56 "name": "type",
57 "required": False,
58 "in": "query",
59 "description": "Filter by payment type, accepts a comma separated list",
60 "schema": {
61 "type": "string",
62 },
63 }
64 ]
65
66
67 class PaymentSettledFilter(filters.BaseFilterBackend):
68 """Allows you to filter by settled status."""
69
70 def filter_queryset(self, request, queryset, view):
71 settled = request.query_params.get("settled", None)
72
73 if settled is None:
74 return queryset
75
76 if strtobool(settled):
77 return queryset.filter(batch__processed=True)
78
79 return queryset.exclude(batch__processed=True)
80
81 def get_schema_operation_parameters(self, view):
82 return [
83 {
84 "name": "settled",
85 "required": False,
86 "in": "query",
87 "description": "Filter by settled status",
88 "schema": {
89 "type": "boolean",
90 },
91 }
92 ]
93
[end of website/payments/api/v2/filters.py]
[start of website/sales/api/v2/filters.py]
1 from rest_framework import filters
2
3 from utils.snippets import extract_date_range, strtobool
4
5
6 class ShiftActiveFilter(filters.BaseFilterBackend):
7 """Allows you to filter by active status."""
8
9 def filter_queryset(self, request, queryset, view):
10 active = request.query_params.get("active", None)
11
12 if active is not None:
13 queryset = queryset.filter(active=strtobool(active))
14
15 return queryset
16
17 def get_schema_operation_parameters(self, view):
18 return [
19 {
20 "name": "active",
21 "required": False,
22 "in": "query",
23 "description": "Filter by active status",
24 "schema": {
25 "type": "boolean",
26 },
27 }
28 ]
29
30
31 class ShiftLockedFilter(filters.BaseFilterBackend):
32 """Allows you to filter by locked status."""
33
34 def filter_queryset(self, request, queryset, view):
35 locked = request.query_params.get("locked", None)
36
37 if locked is not None:
38 queryset = queryset.filter(locked=strtobool(locked))
39
40 return queryset
41
42 def get_schema_operation_parameters(self, view):
43 return [
44 {
45 "name": "locked",
46 "required": False,
47 "in": "query",
48 "description": "Filter by locked status",
49 "schema": {
50 "type": "boolean",
51 },
52 }
53 ]
54
55
56 class ShiftDateFilter(filters.BaseFilterBackend):
57 """Allows you to filter by event start/end dates."""
58
59 def filter_queryset(self, request, queryset, view):
60 start, end = extract_date_range(request, allow_empty=True)
61
62 if start is not None:
63 queryset = queryset.filter(end__gte=start)
64 if end is not None:
65 queryset = queryset.filter(start__lte=end)
66
67 return queryset
68
69 def get_schema_operation_parameters(self, view):
70 return [
71 {
72 "name": "start",
73 "required": False,
74 "in": "query",
75 "description": "Filter shifts by ISO date, starting with this parameter (i.e. 2021-03-30T04:20:00) where `event.end >= value`",
76 "schema": {
77 "type": "string",
78 },
79 },
80 {
81 "name": "end",
82 "required": False,
83 "in": "query",
84 "description": "Filter shifts by ISO date, ending with this parameter (i.e. 2021-05-16T13:37:00) where `event.start <= value`",
85 "schema": {
86 "type": "string",
87 },
88 },
89 ]
90
[end of website/sales/api/v2/filters.py]
[start of website/events/api/v2/admin/filters.py]
1 from rest_framework import filters
2
3 from utils.snippets import strtobool
4
5
6 class PublishedFilter(filters.BaseFilterBackend):
7 """Allows you to filter by published status."""
8
9 def filter_queryset(self, request, queryset, view):
10 published = request.query_params.get("published", None)
11
12 if published is not None:
13 queryset = queryset.filter(published=strtobool(published))
14
15 return queryset
16
17 def get_schema_operation_parameters(self, view):
18 return [
19 {
20 "name": "published",
21 "required": False,
22 "in": "query",
23 "description": "Filter by published status",
24 "schema": {
25 "type": "boolean",
26 },
27 }
28 ]
29
30
31 class EventRegistrationCancelledFilter(filters.BaseFilterBackend):
32 """Allows you to filter by event registration cancellation status."""
33
34 def filter_queryset(self, request, queryset, view):
35 cancelled = request.query_params.get("cancelled", None)
36
37 if cancelled is None:
38 return queryset
39
40 if strtobool(cancelled):
41 return queryset.exclude(date_cancelled=None)
42
43 return queryset.filter(date_cancelled=None)
44
45 def get_schema_operation_parameters(self, view):
46 return [
47 {
48 "name": "cancelled",
49 "required": False,
50 "in": "query",
51 "description": "Filter by event registration cancellation status",
52 "schema": {
53 "type": "boolean",
54 },
55 }
56 ]
57
58
59 class EventRegistrationQueuedFilter(filters.BaseFilterBackend):
60 """Allows you to filter by event registration by if they're in the queue."""
61
62 def filter_queryset(self, request, queryset, view):
63 queued = request.query_params.get("queued", None)
64
65 if queued is None:
66 return queryset
67
68 if strtobool(queued):
69 return queryset.exclude(queue_position=None)
70
71 return queryset.filter(queue_position=None)
72
73 def get_schema_operation_parameters(self, view):
74 return [
75 {
76 "name": "queued",
77 "required": False,
78 "in": "query",
79 "description": "Filter by event registration queue position",
80 "schema": {
81 "type": "boolean",
82 },
83 }
84 ]
85
[end of website/events/api/v2/admin/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/admin/filters.py b/website/events/api/v2/admin/filters.py
--- a/website/events/api/v2/admin/filters.py
+++ b/website/events/api/v2/admin/filters.py
@@ -1,4 +1,5 @@
from rest_framework import filters
+from rest_framework.exceptions import ValidationError
from utils.snippets import strtobool
@@ -10,7 +11,10 @@
published = request.query_params.get("published", None)
if published is not None:
- queryset = queryset.filter(published=strtobool(published))
+ try:
+ queryset = queryset.filter(published=strtobool(published))
+ except ValueError as e:
+ raise ValidationError({"published": "Invalid filter value."}) from e
return queryset
@@ -37,8 +41,11 @@
if cancelled is None:
return queryset
- if strtobool(cancelled):
- return queryset.exclude(date_cancelled=None)
+ try:
+ if strtobool(cancelled):
+ return queryset.exclude(date_cancelled=None)
+ except ValueError as e:
+ raise ValidationError({"cancelled": "Invalid filter value."}) from e
return queryset.filter(date_cancelled=None)
@@ -65,8 +72,11 @@
if queued is None:
return queryset
- if strtobool(queued):
- return queryset.exclude(queue_position=None)
+ try:
+ if strtobool(queued):
+ return queryset.exclude(queue_position=None)
+ except ValueError as e:
+ raise ValidationError({"queued": "Invalid filter value."}) from e
return queryset.filter(queue_position=None)
diff --git a/website/payments/api/v2/filters.py b/website/payments/api/v2/filters.py
--- a/website/payments/api/v2/filters.py
+++ b/website/payments/api/v2/filters.py
@@ -1,4 +1,5 @@
from rest_framework import filters
+from rest_framework.exceptions import ValidationError
from utils.snippets import extract_date_range, strtobool
@@ -73,8 +74,11 @@
if settled is None:
return queryset
- if strtobool(settled):
- return queryset.filter(batch__processed=True)
+ try:
+ if strtobool(settled):
+ return queryset.filter(batch__processed=True)
+ except ValueError as e:
+ raise ValidationError({"settled": "Invalid filter value."}) from e
return queryset.exclude(batch__processed=True)
diff --git a/website/sales/api/v2/filters.py b/website/sales/api/v2/filters.py
--- a/website/sales/api/v2/filters.py
+++ b/website/sales/api/v2/filters.py
@@ -1,4 +1,5 @@
from rest_framework import filters
+from rest_framework.exceptions import ValidationError
from utils.snippets import extract_date_range, strtobool
@@ -10,7 +11,10 @@
active = request.query_params.get("active", None)
if active is not None:
- queryset = queryset.filter(active=strtobool(active))
+ try:
+ queryset = queryset.filter(active=strtobool(active))
+ except ValueError as e:
+ raise ValidationError({"active": "Invalid filter value."}) from e
return queryset
@@ -35,7 +39,10 @@
locked = request.query_params.get("locked", None)
if locked is not None:
- queryset = queryset.filter(locked=strtobool(locked))
+ try:
+ queryset = queryset.filter(locked=strtobool(locked))
+ except ValueError as e:
+ raise ValidationError({"locked": "Invalid filter value."}) from e
return queryset
| {"golden_diff": "diff --git a/website/events/api/v2/admin/filters.py b/website/events/api/v2/admin/filters.py\n--- a/website/events/api/v2/admin/filters.py\n+++ b/website/events/api/v2/admin/filters.py\n@@ -1,4 +1,5 @@\n from rest_framework import filters\n+from rest_framework.exceptions import ValidationError\n \n from utils.snippets import strtobool\n \n@@ -10,7 +11,10 @@\n published = request.query_params.get(\"published\", None)\n \n if published is not None:\n- queryset = queryset.filter(published=strtobool(published))\n+ try:\n+ queryset = queryset.filter(published=strtobool(published))\n+ except ValueError as e:\n+ raise ValidationError({\"published\": \"Invalid filter value.\"}) from e\n \n return queryset\n \n@@ -37,8 +41,11 @@\n if cancelled is None:\n return queryset\n \n- if strtobool(cancelled):\n- return queryset.exclude(date_cancelled=None)\n+ try:\n+ if strtobool(cancelled):\n+ return queryset.exclude(date_cancelled=None)\n+ except ValueError as e:\n+ raise ValidationError({\"cancelled\": \"Invalid filter value.\"}) from e\n \n return queryset.filter(date_cancelled=None)\n \n@@ -65,8 +72,11 @@\n if queued is None:\n return queryset\n \n- if strtobool(queued):\n- return queryset.exclude(queue_position=None)\n+ try:\n+ if strtobool(queued):\n+ return queryset.exclude(queue_position=None)\n+ except ValueError as e:\n+ raise ValidationError({\"queued\": \"Invalid filter value.\"}) from e\n \n return queryset.filter(queue_position=None)\n \ndiff --git a/website/payments/api/v2/filters.py b/website/payments/api/v2/filters.py\n--- a/website/payments/api/v2/filters.py\n+++ b/website/payments/api/v2/filters.py\n@@ -1,4 +1,5 @@\n from rest_framework import filters\n+from rest_framework.exceptions import ValidationError\n \n from utils.snippets import extract_date_range, strtobool\n \n@@ -73,8 +74,11 @@\n if settled is None:\n return queryset\n \n- if strtobool(settled):\n- return queryset.filter(batch__processed=True)\n+ try:\n+ if strtobool(settled):\n+ return queryset.filter(batch__processed=True)\n+ except ValueError as e:\n+ raise ValidationError({\"settled\": \"Invalid filter value.\"}) from e\n \n return queryset.exclude(batch__processed=True)\n \ndiff --git a/website/sales/api/v2/filters.py b/website/sales/api/v2/filters.py\n--- a/website/sales/api/v2/filters.py\n+++ b/website/sales/api/v2/filters.py\n@@ -1,4 +1,5 @@\n from rest_framework import filters\n+from rest_framework.exceptions import ValidationError\n \n from utils.snippets import extract_date_range, strtobool\n \n@@ -10,7 +11,10 @@\n active = request.query_params.get(\"active\", None)\n \n if active is not None:\n- queryset = queryset.filter(active=strtobool(active))\n+ try:\n+ queryset = queryset.filter(active=strtobool(active))\n+ except ValueError as e:\n+ raise ValidationError({\"active\": \"Invalid filter value.\"}) from e\n \n return queryset\n \n@@ -35,7 +39,10 @@\n locked = request.query_params.get(\"locked\", None)\n \n if locked is not None:\n- queryset = queryset.filter(locked=strtobool(locked))\n+ try:\n+ queryset = queryset.filter(locked=strtobool(locked))\n+ except ValueError as e:\n+ raise ValidationError({\"locked\": \"Invalid filter value.\"}) from e\n \n return queryset\n", "issue": "Handle invalid filter values\n### Describe the bug\r\nhttps://thalia.sentry.io/issues/4448548023/?project=1463433&query=is%3Aunresolved+issue.category%3Aerror&referrer=issue-stream&stream_index=8\r\n\r\nWhen passing e.g. \"foo\" to a boolean filter in some API views, you get 500 Internal Server Error, not 400 Bad Request.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to `https://staging.thalia.nu/api/v2/admin/events/183/registrations/?queued=all`\r\n\r\n### Expected behaviour\r\n400\r\n\r\n\r\n_Note that there are more such filters. Let's fix them all, not just this single one._\n", "before_files": [{"content": "from rest_framework import filters\n\nfrom utils.snippets import extract_date_range, strtobool\n\n\nclass CreatedAtFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by payment creation dates.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n start, end = extract_date_range(request, allow_empty=True)\n\n if start is not None:\n queryset = queryset.filter(created_at__gte=start)\n if end is not None:\n queryset = queryset.filter(created_at__lte=end)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"start\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter payments by ISO date, starting with this parameter (i.e. 2021-03-30T04:20:00) where `payment.created_at >= value`\",\n \"schema\": {\n \"type\": \"string\",\n },\n },\n {\n \"name\": \"end\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter payments by ISO date, ending with this parameter (i.e. 2021-05-16T13:37:00) where `payment.created_at <= value`\",\n \"schema\": {\n \"type\": \"string\",\n },\n },\n ]\n\n\nclass PaymentTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by payment type.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n payment_type = request.query_params.get(\"type\", None)\n\n if payment_type:\n queryset = queryset.filter(type__in=payment_type.split(\",\"))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"type\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by payment type, accepts a comma separated list\",\n \"schema\": {\n \"type\": \"string\",\n },\n }\n ]\n\n\nclass PaymentSettledFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by settled status.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n settled = request.query_params.get(\"settled\", None)\n\n if settled is None:\n return queryset\n\n if strtobool(settled):\n return queryset.filter(batch__processed=True)\n\n return queryset.exclude(batch__processed=True)\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"settled\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by settled status\",\n \"schema\": {\n \"type\": \"boolean\",\n },\n }\n ]\n", "path": "website/payments/api/v2/filters.py"}, {"content": "from rest_framework import filters\n\nfrom utils.snippets import extract_date_range, strtobool\n\n\nclass ShiftActiveFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by active status.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n active = request.query_params.get(\"active\", None)\n\n if active is not None:\n queryset = queryset.filter(active=strtobool(active))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"active\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by active status\",\n \"schema\": {\n \"type\": \"boolean\",\n },\n }\n ]\n\n\nclass ShiftLockedFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by locked status.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n locked = request.query_params.get(\"locked\", None)\n\n if locked is not None:\n queryset = queryset.filter(locked=strtobool(locked))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"locked\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by locked status\",\n \"schema\": {\n \"type\": \"boolean\",\n },\n }\n ]\n\n\nclass ShiftDateFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by event start/end dates.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n start, end = extract_date_range(request, allow_empty=True)\n\n if start is not None:\n queryset = queryset.filter(end__gte=start)\n if end is not None:\n queryset = queryset.filter(start__lte=end)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"start\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter shifts by ISO date, starting with this parameter (i.e. 2021-03-30T04:20:00) where `event.end >= value`\",\n \"schema\": {\n \"type\": \"string\",\n },\n },\n {\n \"name\": \"end\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter shifts by ISO date, ending with this parameter (i.e. 2021-05-16T13:37:00) where `event.start <= value`\",\n \"schema\": {\n \"type\": \"string\",\n },\n },\n ]\n", "path": "website/sales/api/v2/filters.py"}, {"content": "from rest_framework import filters\n\nfrom utils.snippets import strtobool\n\n\nclass PublishedFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by published status.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n published = request.query_params.get(\"published\", None)\n\n if published is not None:\n queryset = queryset.filter(published=strtobool(published))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"published\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by published status\",\n \"schema\": {\n \"type\": \"boolean\",\n },\n }\n ]\n\n\nclass EventRegistrationCancelledFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by event registration cancellation status.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n cancelled = request.query_params.get(\"cancelled\", None)\n\n if cancelled is None:\n return queryset\n\n if strtobool(cancelled):\n return queryset.exclude(date_cancelled=None)\n\n return queryset.filter(date_cancelled=None)\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"cancelled\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by event registration cancellation status\",\n \"schema\": {\n \"type\": \"boolean\",\n },\n }\n ]\n\n\nclass EventRegistrationQueuedFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by event registration by if they're in the queue.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n queued = request.query_params.get(\"queued\", None)\n\n if queued is None:\n return queryset\n\n if strtobool(queued):\n return queryset.exclude(queue_position=None)\n\n return queryset.filter(queue_position=None)\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"queued\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by event registration queue position\",\n \"schema\": {\n \"type\": \"boolean\",\n },\n }\n ]\n", "path": "website/events/api/v2/admin/filters.py"}]} | 2,903 | 835 |
gh_patches_debug_30619 | rasdani/github-patches | git_diff | ipython__ipython-8859 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong Tooltip with shift-tab
@andram opened jupyter/notebook#520 at 2015-10-01 10:09:29 UTC
If a notebook cell starts with a multiline string, then shift-tab gets confused and shows the wrong tooltips for following lines. For example consider a cell containing the following 5 lines:
```
"""
Docstring
"""
max()
min()
```
now position the cursor inside the brackets of max(). Expected behaviour is that the docstring for `max` is shown. Actual behaviour is that the docstring for min is shown. (see attached screenshot for current github version of jupyter notebook)

</issue>
<code>
[start of IPython/utils/tokenutil.py]
1 """Token-related utilities"""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 from __future__ import absolute_import, print_function
7
8 from collections import namedtuple
9 from io import StringIO
10 from keyword import iskeyword
11
12 from . import tokenize2
13 from .py3compat import cast_unicode_py2
14
15 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
16
17 def generate_tokens(readline):
18 """wrap generate_tokens to catch EOF errors"""
19 try:
20 for token in tokenize2.generate_tokens(readline):
21 yield token
22 except tokenize2.TokenError:
23 # catch EOF error
24 return
25
26 def line_at_cursor(cell, cursor_pos=0):
27 """Return the line in a cell at a given cursor position
28
29 Used for calling line-based APIs that don't support multi-line input, yet.
30
31 Parameters
32 ----------
33
34 cell: text
35 multiline block of text
36 cursor_pos: integer
37 the cursor position
38
39 Returns
40 -------
41
42 (line, offset): (text, integer)
43 The line with the current cursor, and the character offset of the start of the line.
44 """
45 offset = 0
46 lines = cell.splitlines(True)
47 for line in lines:
48 next_offset = offset + len(line)
49 if next_offset >= cursor_pos:
50 break
51 offset = next_offset
52 else:
53 line = ""
54 return (line, offset)
55
56 def token_at_cursor(cell, cursor_pos=0):
57 """Get the token at a given cursor
58
59 Used for introspection.
60
61 Function calls are prioritized, so the token for the callable will be returned
62 if the cursor is anywhere inside the call.
63
64 Parameters
65 ----------
66
67 cell : unicode
68 A block of Python code
69 cursor_pos : int
70 The location of the cursor in the block where the token should be found
71 """
72 cell = cast_unicode_py2(cell)
73 names = []
74 tokens = []
75 offset = 0
76 call_names = []
77 for tup in generate_tokens(StringIO(cell).readline):
78
79 tok = Token(*tup)
80
81 # token, text, start, end, line = tup
82 start_col = tok.start[1]
83 end_col = tok.end[1]
84 # allow '|foo' to find 'foo' at the beginning of a line
85 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
86 if offset + start_col >= boundary:
87 # current token starts after the cursor,
88 # don't consume it
89 break
90
91 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
92 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
93 names[-1] = "%s.%s" % (names[-1], tok.text)
94 else:
95 names.append(tok.text)
96 elif tok.token == tokenize2.OP:
97 if tok.text == '=' and names:
98 # don't inspect the lhs of an assignment
99 names.pop(-1)
100 if tok.text == '(' and names:
101 # if we are inside a function call, inspect the function
102 call_names.append(names[-1])
103 elif tok.text == ')' and call_names:
104 call_names.pop(-1)
105
106 if offset + end_col > cursor_pos:
107 # we found the cursor, stop reading
108 break
109
110 tokens.append(tok)
111 if tok.token in (tokenize2.NEWLINE, tokenize2.NL):
112 offset += len(tok.line)
113
114 if call_names:
115 return call_names[-1]
116 elif names:
117 return names[-1]
118 else:
119 return ''
120
121
122
[end of IPython/utils/tokenutil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/utils/tokenutil.py b/IPython/utils/tokenutil.py
--- a/IPython/utils/tokenutil.py
+++ b/IPython/utils/tokenutil.py
@@ -72,15 +72,24 @@
cell = cast_unicode_py2(cell)
names = []
tokens = []
- offset = 0
call_names = []
+
+ offsets = {1: 0} # lines start at 1
for tup in generate_tokens(StringIO(cell).readline):
tok = Token(*tup)
# token, text, start, end, line = tup
- start_col = tok.start[1]
- end_col = tok.end[1]
+ start_line, start_col = tok.start
+ end_line, end_col = tok.end
+ if end_line + 1 not in offsets:
+ # keep track of offsets for each line
+ lines = tok.line.splitlines(True)
+ for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
+ if lineno not in offsets:
+ offsets[lineno] = offsets[lineno-1] + len(line)
+
+ offset = offsets[start_line]
# allow '|foo' to find 'foo' at the beginning of a line
boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
if offset + start_col >= boundary:
@@ -103,14 +112,12 @@
elif tok.text == ')' and call_names:
call_names.pop(-1)
- if offset + end_col > cursor_pos:
+ tokens.append(tok)
+
+ if offsets[end_line] + end_col > cursor_pos:
# we found the cursor, stop reading
break
- tokens.append(tok)
- if tok.token in (tokenize2.NEWLINE, tokenize2.NL):
- offset += len(tok.line)
-
if call_names:
return call_names[-1]
elif names:
| {"golden_diff": "diff --git a/IPython/utils/tokenutil.py b/IPython/utils/tokenutil.py\n--- a/IPython/utils/tokenutil.py\n+++ b/IPython/utils/tokenutil.py\n@@ -72,15 +72,24 @@\n cell = cast_unicode_py2(cell)\n names = []\n tokens = []\n- offset = 0\n call_names = []\n+ \n+ offsets = {1: 0} # lines start at 1\n for tup in generate_tokens(StringIO(cell).readline):\n \n tok = Token(*tup)\n \n # token, text, start, end, line = tup\n- start_col = tok.start[1]\n- end_col = tok.end[1]\n+ start_line, start_col = tok.start\n+ end_line, end_col = tok.end\n+ if end_line + 1 not in offsets:\n+ # keep track of offsets for each line\n+ lines = tok.line.splitlines(True)\n+ for lineno, line in zip(range(start_line + 1, end_line + 2), lines):\n+ if lineno not in offsets:\n+ offsets[lineno] = offsets[lineno-1] + len(line)\n+ \n+ offset = offsets[start_line]\n # allow '|foo' to find 'foo' at the beginning of a line\n boundary = cursor_pos + 1 if start_col == 0 else cursor_pos\n if offset + start_col >= boundary:\n@@ -103,14 +112,12 @@\n elif tok.text == ')' and call_names:\n call_names.pop(-1)\n \n- if offset + end_col > cursor_pos:\n+ tokens.append(tok)\n+ \n+ if offsets[end_line] + end_col > cursor_pos:\n # we found the cursor, stop reading\n break\n \n- tokens.append(tok)\n- if tok.token in (tokenize2.NEWLINE, tokenize2.NL):\n- offset += len(tok.line)\n- \n if call_names:\n return call_names[-1]\n elif names:\n", "issue": "Wrong Tooltip with shift-tab\n@andram opened jupyter/notebook#520 at 2015-10-01 10:09:29 UTC\n\nIf a notebook cell starts with a multiline string, then shift-tab gets confused and shows the wrong tooltips for following lines. For example consider a cell containing the following 5 lines:\n\n```\n\"\"\"\nDocstring\n\"\"\"\nmax()\nmin()\n```\n\nnow position the cursor inside the brackets of max(). Expected behaviour is that the docstring for `max` is shown. Actual behaviour is that the docstring for min is shown. (see attached screenshot for current github version of jupyter notebook)\n\n\n\n", "before_files": [{"content": "\"\"\"Token-related utilities\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import absolute_import, print_function\n\nfrom collections import namedtuple\nfrom io import StringIO\nfrom keyword import iskeyword\n\nfrom . import tokenize2\nfrom .py3compat import cast_unicode_py2\n\nToken = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])\n\ndef generate_tokens(readline):\n \"\"\"wrap generate_tokens to catch EOF errors\"\"\"\n try:\n for token in tokenize2.generate_tokens(readline):\n yield token\n except tokenize2.TokenError:\n # catch EOF error\n return\n\ndef line_at_cursor(cell, cursor_pos=0):\n \"\"\"Return the line in a cell at a given cursor position\n \n Used for calling line-based APIs that don't support multi-line input, yet.\n \n Parameters\n ----------\n \n cell: text\n multiline block of text\n cursor_pos: integer\n the cursor position\n \n Returns\n -------\n \n (line, offset): (text, integer)\n The line with the current cursor, and the character offset of the start of the line.\n \"\"\"\n offset = 0\n lines = cell.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if next_offset >= cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)\n\ndef token_at_cursor(cell, cursor_pos=0):\n \"\"\"Get the token at a given cursor\n \n Used for introspection.\n \n Function calls are prioritized, so the token for the callable will be returned\n if the cursor is anywhere inside the call.\n \n Parameters\n ----------\n \n cell : unicode\n A block of Python code\n cursor_pos : int\n The location of the cursor in the block where the token should be found\n \"\"\"\n cell = cast_unicode_py2(cell)\n names = []\n tokens = []\n offset = 0\n call_names = []\n for tup in generate_tokens(StringIO(cell).readline):\n \n tok = Token(*tup)\n \n # token, text, start, end, line = tup\n start_col = tok.start[1]\n end_col = tok.end[1]\n # allow '|foo' to find 'foo' at the beginning of a line\n boundary = cursor_pos + 1 if start_col == 0 else cursor_pos\n if offset + start_col >= boundary:\n # current token starts after the cursor,\n # don't consume it\n break\n \n if tok.token == tokenize2.NAME and not iskeyword(tok.text):\n if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':\n names[-1] = \"%s.%s\" % (names[-1], tok.text)\n else:\n names.append(tok.text)\n elif tok.token == tokenize2.OP:\n if tok.text == '=' and names:\n # don't inspect the lhs of an assignment\n names.pop(-1)\n if tok.text == '(' and names:\n # if we are inside a function call, inspect the function\n call_names.append(names[-1])\n elif tok.text == ')' and call_names:\n call_names.pop(-1)\n \n if offset + end_col > cursor_pos:\n # we found the cursor, stop reading\n break\n \n tokens.append(tok)\n if tok.token in (tokenize2.NEWLINE, tokenize2.NL):\n offset += len(tok.line)\n \n if call_names:\n return call_names[-1]\n elif names:\n return names[-1]\n else:\n return ''\n \n\n", "path": "IPython/utils/tokenutil.py"}]} | 1,803 | 444 |
gh_patches_debug_21233 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4048 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(react list) proposal-list-item should show created/modified date
**URL:** proposal-list
**user:** any
**expected behaviour:** list items (tiles) in react list should show `created on/modified on` in front of date (see django list)
**behaviour:** list items do not indicate created on or modified on, but shows only the date
**important screensize:** any
**device & browser:** any
**Comment/Question:**
Screenshot?
Note: This will only affect where this particular react list is used (currently only used for budgeting/proposal-list)
</issue>
<code>
[start of meinberlin/apps/budgeting/serializers.py]
1 from django.utils import translation
2 from rest_framework import serializers
3
4 from adhocracy4.categories.models import Category
5
6 from .models import Proposal
7
8
9 class CategoryField(serializers.Field):
10
11 def to_internal_value(self, category):
12 if category:
13 return Category.objects.get(pk=category)
14 else:
15 return None
16
17 def to_representation(self, category):
18 return {'id': category.pk, 'name': category.name}
19
20
21 class ProposalSerializer(serializers.ModelSerializer):
22
23 creator = serializers.SerializerMethodField()
24 comment_count = serializers.SerializerMethodField()
25 positive_rating_count = serializers.SerializerMethodField()
26 negative_rating_count = serializers.SerializerMethodField()
27 category = CategoryField()
28 url = serializers.SerializerMethodField()
29 locale = serializers.SerializerMethodField()
30
31 class Meta:
32 model = Proposal
33 fields = ('budget', 'category', 'comment_count', 'created', 'creator',
34 'is_archived', 'name', 'negative_rating_count',
35 'positive_rating_count', 'url', 'pk', 'moderator_feedback',
36 'moderator_feedback_choices', 'locale')
37 read_only_fields = ('budget', 'category', 'comment_count', 'created',
38 'creator', 'is_archived', 'name',
39 'negative_rating_count', 'positive_rating_count',
40 'url', 'pk', 'moderator_feedback',
41 'moderator_feedback_choices', 'locale')
42
43 def get_creator(self, proposal):
44 return proposal.creator.username
45
46 def get_comment_count(self, proposal):
47 if hasattr(proposal, 'comment_count'):
48 return proposal.comment_count
49 else:
50 return 0
51
52 def get_positive_rating_count(self, proposal):
53 if hasattr(proposal, 'positive_rating_count'):
54 return proposal.positive_rating_count
55 else:
56 return 0
57
58 def get_negative_rating_count(self, proposal):
59 if hasattr(proposal, 'negative_rating_count'):
60 return proposal.negative_rating_count
61 else:
62 return 0
63
64 def get_url(self, proposal):
65 return proposal.get_absolute_url()
66
67 def get_moderator_feedback(self, proposal):
68 if hasattr(proposal, 'moderator_feedback'):
69 return proposal.moderator_feedback
70 else:
71 return None
72
73 def get_moderator_feedback_choices(self, proposal):
74 if hasattr(proposal, 'moderator_feedback_choices'):
75 return proposal.moderator_feedback_choices
76 else:
77 return None
78
79 def get_locale(self, proposal):
80 return translation.get_language()
81
[end of meinberlin/apps/budgeting/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py
--- a/meinberlin/apps/budgeting/serializers.py
+++ b/meinberlin/apps/budgeting/serializers.py
@@ -30,12 +30,12 @@
class Meta:
model = Proposal
- fields = ('budget', 'category', 'comment_count', 'created', 'creator',
- 'is_archived', 'name', 'negative_rating_count',
+ fields = ('budget', 'category', 'comment_count', 'created', 'modified',
+ 'creator', 'is_archived', 'name', 'negative_rating_count',
'positive_rating_count', 'url', 'pk', 'moderator_feedback',
'moderator_feedback_choices', 'locale')
read_only_fields = ('budget', 'category', 'comment_count', 'created',
- 'creator', 'is_archived', 'name',
+ 'modified', 'creator', 'is_archived', 'name',
'negative_rating_count', 'positive_rating_count',
'url', 'pk', 'moderator_feedback',
'moderator_feedback_choices', 'locale')
| {"golden_diff": "diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py\n--- a/meinberlin/apps/budgeting/serializers.py\n+++ b/meinberlin/apps/budgeting/serializers.py\n@@ -30,12 +30,12 @@\n \n class Meta:\n model = Proposal\n- fields = ('budget', 'category', 'comment_count', 'created', 'creator',\n- 'is_archived', 'name', 'negative_rating_count',\n+ fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n+ 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n- 'creator', 'is_archived', 'name',\n+ 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n", "issue": "(react list) proposal-list-item should show created/modified date\n**URL:** proposal-list\r\n**user:** any\r\n**expected behaviour:** list items (tiles) in react list should show `created on/modified on` in front of date (see django list)\r\n**behaviour:** list items do not indicate created on or modified on, but shows only the date\r\n**important screensize:** any\r\n**device & browser:** any\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\r\nNote: This will only affect where this particular react list is used (currently only used for budgeting/proposal-list)\n", "before_files": [{"content": "from django.utils import translation\nfrom rest_framework import serializers\n\nfrom adhocracy4.categories.models import Category\n\nfrom .models import Proposal\n\n\nclass CategoryField(serializers.Field):\n\n def to_internal_value(self, category):\n if category:\n return Category.objects.get(pk=category)\n else:\n return None\n\n def to_representation(self, category):\n return {'id': category.pk, 'name': category.name}\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n\n creator = serializers.SerializerMethodField()\n comment_count = serializers.SerializerMethodField()\n positive_rating_count = serializers.SerializerMethodField()\n negative_rating_count = serializers.SerializerMethodField()\n category = CategoryField()\n url = serializers.SerializerMethodField()\n locale = serializers.SerializerMethodField()\n\n class Meta:\n model = Proposal\n fields = ('budget', 'category', 'comment_count', 'created', 'creator',\n 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'moderator_feedback_choices', 'locale')\n\n def get_creator(self, proposal):\n return proposal.creator.username\n\n def get_comment_count(self, proposal):\n if hasattr(proposal, 'comment_count'):\n return proposal.comment_count\n else:\n return 0\n\n def get_positive_rating_count(self, proposal):\n if hasattr(proposal, 'positive_rating_count'):\n return proposal.positive_rating_count\n else:\n return 0\n\n def get_negative_rating_count(self, proposal):\n if hasattr(proposal, 'negative_rating_count'):\n return proposal.negative_rating_count\n else:\n return 0\n\n def get_url(self, proposal):\n return proposal.get_absolute_url()\n\n def get_moderator_feedback(self, proposal):\n if hasattr(proposal, 'moderator_feedback'):\n return proposal.moderator_feedback\n else:\n return None\n\n def get_moderator_feedback_choices(self, proposal):\n if hasattr(proposal, 'moderator_feedback_choices'):\n return proposal.moderator_feedback_choices\n else:\n return None\n\n def get_locale(self, proposal):\n return translation.get_language()\n", "path": "meinberlin/apps/budgeting/serializers.py"}]} | 1,350 | 264 |
gh_patches_debug_5471 | rasdani/github-patches | git_diff | archlinux__archinstall-565 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exit code 256 in networking.py
While doing PR #562 I noticed that we check if exit code is 256. I thought for any process it was an 8 bit value and could be 0 through 255, so I'm not sure about this logic. I'd like to figure out why it was written in this manner and it probably should be fixed. Maybe drop the exit code check entirely?
</issue>
<code>
[start of archinstall/lib/networking.py]
1 import fcntl
2 import logging
3 import os
4 import socket
5 import struct
6 from collections import OrderedDict
7
8 from .exceptions import *
9 from .general import SysCommand
10 from .output import log
11 from .storage import storage
12
13
14 def get_hw_addr(ifname):
15 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
16 info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))
17 return ':'.join('%02x' % b for b in info[18:24])
18
19
20 def list_interfaces(skip_loopback=True):
21 interfaces = OrderedDict()
22 for index, iface in socket.if_nameindex():
23 if skip_loopback and iface == "lo":
24 continue
25
26 mac = get_hw_addr(iface).replace(':', '-').lower()
27 interfaces[mac] = iface
28 return interfaces
29
30
31 def check_mirror_reachable():
32 if (exit_code := SysCommand("pacman -Sy").exit_code) == 0:
33 return True
34 elif exit_code == 256:
35 if os.geteuid() != 0:
36 log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red")
37
38 return False
39
40
41 def enrich_iface_types(interfaces: dict):
42 result = {}
43 for iface in interfaces:
44 if os.path.isdir(f"/sys/class/net/{iface}/bridge/"):
45 result[iface] = 'BRIDGE'
46 elif os.path.isfile(f"/sys/class/net/{iface}/tun_flags"):
47 # ethtool -i {iface}
48 result[iface] = 'TUN/TAP'
49 elif os.path.isdir(f"/sys/class/net/{iface}/device"):
50 if os.path.isdir(f"/sys/class/net/{iface}/wireless/"):
51 result[iface] = 'WIRELESS'
52 else:
53 result[iface] = 'PHYSICAL'
54 else:
55 result[iface] = 'UNKNOWN'
56 return result
57
58
59 def get_interface_from_mac(mac):
60 return list_interfaces().get(mac.lower(), None)
61
62
63 def wireless_scan(interface):
64 interfaces = enrich_iface_types(list_interfaces().values())
65 if interfaces[interface] != 'WIRELESS':
66 raise HardwareIncompatibilityError(f"Interface {interface} is not a wireless interface: {interfaces}")
67
68 SysCommand(f"iwctl station {interface} scan")
69
70 if '_WIFI' not in storage:
71 storage['_WIFI'] = {}
72 if interface not in storage['_WIFI']:
73 storage['_WIFI'][interface] = {}
74
75 storage['_WIFI'][interface]['scanning'] = True
76
77
78 # TODO: Full WiFi experience might get evolved in the future, pausing for now 2021-01-25
79 def get_wireless_networks(interface):
80 # TODO: Make this oneliner pritter to check if the interface is scanning or not.
81 if '_WIFI' not in storage or interface not in storage['_WIFI'] or storage['_WIFI'][interface].get('scanning', False) is False:
82 import time
83
84 wireless_scan(interface)
85 time.sleep(5)
86
87 for line in SysCommand(f"iwctl station {interface} get-networks"):
88 print(line)
89
[end of archinstall/lib/networking.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/networking.py b/archinstall/lib/networking.py
--- a/archinstall/lib/networking.py
+++ b/archinstall/lib/networking.py
@@ -31,9 +31,8 @@
def check_mirror_reachable():
if (exit_code := SysCommand("pacman -Sy").exit_code) == 0:
return True
- elif exit_code == 256:
- if os.geteuid() != 0:
- log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red")
+ elif os.geteuid() != 0:
+ log("check_mirror_reachable() uses 'pacman -Sy' which requires root.", level=logging.ERROR, fg="red")
return False
| {"golden_diff": "diff --git a/archinstall/lib/networking.py b/archinstall/lib/networking.py\n--- a/archinstall/lib/networking.py\n+++ b/archinstall/lib/networking.py\n@@ -31,9 +31,8 @@\n def check_mirror_reachable():\n \tif (exit_code := SysCommand(\"pacman -Sy\").exit_code) == 0:\n \t\treturn True\n-\telif exit_code == 256:\n-\t\tif os.geteuid() != 0:\n-\t\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n+\telif os.geteuid() != 0:\n+\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n \n \treturn False\n", "issue": "Exit code 256 in networking.py\nWhile doing PR #562 I noticed that we check if exit code is 256. I thought for any process it was an 8 bit value and could be 0 through 255, so I'm not sure about this logic. I'd like to figure out why it was written in this manner and it probably should be fixed. Maybe drop the exit code check entirely? \n", "before_files": [{"content": "import fcntl\nimport logging\nimport os\nimport socket\nimport struct\nfrom collections import OrderedDict\n\nfrom .exceptions import *\nfrom .general import SysCommand\nfrom .output import log\nfrom .storage import storage\n\n\ndef get_hw_addr(ifname):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tinfo = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n\treturn ':'.join('%02x' % b for b in info[18:24])\n\n\ndef list_interfaces(skip_loopback=True):\n\tinterfaces = OrderedDict()\n\tfor index, iface in socket.if_nameindex():\n\t\tif skip_loopback and iface == \"lo\":\n\t\t\tcontinue\n\n\t\tmac = get_hw_addr(iface).replace(':', '-').lower()\n\t\tinterfaces[mac] = iface\n\treturn interfaces\n\n\ndef check_mirror_reachable():\n\tif (exit_code := SysCommand(\"pacman -Sy\").exit_code) == 0:\n\t\treturn True\n\telif exit_code == 256:\n\t\tif os.geteuid() != 0:\n\t\t\tlog(\"check_mirror_reachable() uses 'pacman -Sy' which requires root.\", level=logging.ERROR, fg=\"red\")\n\n\treturn False\n\n\ndef enrich_iface_types(interfaces: dict):\n\tresult = {}\n\tfor iface in interfaces:\n\t\tif os.path.isdir(f\"/sys/class/net/{iface}/bridge/\"):\n\t\t\tresult[iface] = 'BRIDGE'\n\t\telif os.path.isfile(f\"/sys/class/net/{iface}/tun_flags\"):\n\t\t\t# ethtool -i {iface}\n\t\t\tresult[iface] = 'TUN/TAP'\n\t\telif os.path.isdir(f\"/sys/class/net/{iface}/device\"):\n\t\t\tif os.path.isdir(f\"/sys/class/net/{iface}/wireless/\"):\n\t\t\t\tresult[iface] = 'WIRELESS'\n\t\t\telse:\n\t\t\t\tresult[iface] = 'PHYSICAL'\n\t\telse:\n\t\t\tresult[iface] = 'UNKNOWN'\n\treturn result\n\n\ndef get_interface_from_mac(mac):\n\treturn list_interfaces().get(mac.lower(), None)\n\n\ndef wireless_scan(interface):\n\tinterfaces = enrich_iface_types(list_interfaces().values())\n\tif interfaces[interface] != 'WIRELESS':\n\t\traise HardwareIncompatibilityError(f\"Interface {interface} is not a wireless interface: {interfaces}\")\n\n\tSysCommand(f\"iwctl station {interface} scan\")\n\n\tif '_WIFI' not in storage:\n\t\tstorage['_WIFI'] = {}\n\tif interface not in storage['_WIFI']:\n\t\tstorage['_WIFI'][interface] = {}\n\n\tstorage['_WIFI'][interface]['scanning'] = True\n\n\n# TODO: Full WiFi experience might get evolved in the future, pausing for now 2021-01-25\ndef get_wireless_networks(interface):\n\t# TODO: Make this oneliner pritter to check if the interface is scanning or not.\n\tif '_WIFI' not in storage or interface not in storage['_WIFI'] or storage['_WIFI'][interface].get('scanning', False) is False:\n\t\timport time\n\n\t\twireless_scan(interface)\n\t\ttime.sleep(5)\n\n\tfor line in SysCommand(f\"iwctl station {interface} get-networks\"):\n\t\tprint(line)\n", "path": "archinstall/lib/networking.py"}]} | 1,529 | 173 |
gh_patches_debug_21199 | rasdani/github-patches | git_diff | tobymao__sqlglot-2160 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Teradata to Databricks transpilation issue
When working on Teradata to Databricks conversion i ran into a issue where a command like "Database databasename" which is equvalent to "USE Databasename" on Databricks side is not getting tranpiled correctly. The final output still shows the original command.
**Sample code** below
```
sql="""
DATABASE tduser;
"""
try:
new_query = sqlglot.transpile(sql, read='teradata', write='databricks', pretty=True)[0]
print(new_query)
except sqlglot.errors.ParseError as error:
print(traceback.format_exc())
print(error.errors)
```
**Expected output:**
USE tduser
**Official Documentation**
https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-usedb.html
</issue>
<code>
[start of sqlglot/dialects/teradata.py]
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, max_or_greatest, min_or_least
7 from sqlglot.tokens import TokenType
8
9
10 class Teradata(Dialect):
11 TIME_MAPPING = {
12 "Y": "%Y",
13 "YYYY": "%Y",
14 "YY": "%y",
15 "MMMM": "%B",
16 "MMM": "%b",
17 "DD": "%d",
18 "D": "%-d",
19 "HH": "%H",
20 "H": "%-H",
21 "MM": "%M",
22 "M": "%-M",
23 "SS": "%S",
24 "S": "%-S",
25 "SSSSSS": "%f",
26 "E": "%a",
27 "EE": "%a",
28 "EEE": "%a",
29 "EEEE": "%A",
30 }
31
32 class Tokenizer(tokens.Tokenizer):
33 # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
34 KEYWORDS = {
35 **tokens.Tokenizer.KEYWORDS,
36 "^=": TokenType.NEQ,
37 "BYTEINT": TokenType.SMALLINT,
38 "COLLECT": TokenType.COMMAND,
39 "GE": TokenType.GTE,
40 "GT": TokenType.GT,
41 "HELP": TokenType.COMMAND,
42 "INS": TokenType.INSERT,
43 "LE": TokenType.LTE,
44 "LT": TokenType.LT,
45 "MOD": TokenType.MOD,
46 "NE": TokenType.NEQ,
47 "NOT=": TokenType.NEQ,
48 "SEL": TokenType.SELECT,
49 "ST_GEOMETRY": TokenType.GEOMETRY,
50 "TOP": TokenType.TOP,
51 }
52
53 # Teradata does not support % as a modulo operator
54 SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
55 SINGLE_TOKENS.pop("%")
56
57 class Parser(parser.Parser):
58 CHARSET_TRANSLATORS = {
59 "GRAPHIC_TO_KANJISJIS",
60 "GRAPHIC_TO_LATIN",
61 "GRAPHIC_TO_UNICODE",
62 "GRAPHIC_TO_UNICODE_PadSpace",
63 "KANJI1_KanjiEBCDIC_TO_UNICODE",
64 "KANJI1_KanjiEUC_TO_UNICODE",
65 "KANJI1_KANJISJIS_TO_UNICODE",
66 "KANJI1_SBC_TO_UNICODE",
67 "KANJISJIS_TO_GRAPHIC",
68 "KANJISJIS_TO_LATIN",
69 "KANJISJIS_TO_UNICODE",
70 "LATIN_TO_GRAPHIC",
71 "LATIN_TO_KANJISJIS",
72 "LATIN_TO_UNICODE",
73 "LOCALE_TO_UNICODE",
74 "UNICODE_TO_GRAPHIC",
75 "UNICODE_TO_GRAPHIC_PadGraphic",
76 "UNICODE_TO_GRAPHIC_VarGraphic",
77 "UNICODE_TO_KANJI1_KanjiEBCDIC",
78 "UNICODE_TO_KANJI1_KanjiEUC",
79 "UNICODE_TO_KANJI1_KANJISJIS",
80 "UNICODE_TO_KANJI1_SBC",
81 "UNICODE_TO_KANJISJIS",
82 "UNICODE_TO_LATIN",
83 "UNICODE_TO_LOCALE",
84 "UNICODE_TO_UNICODE_FoldSpace",
85 "UNICODE_TO_UNICODE_Fullwidth",
86 "UNICODE_TO_UNICODE_Halfwidth",
87 "UNICODE_TO_UNICODE_NFC",
88 "UNICODE_TO_UNICODE_NFD",
89 "UNICODE_TO_UNICODE_NFKC",
90 "UNICODE_TO_UNICODE_NFKD",
91 }
92
93 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
94 FUNC_TOKENS.remove(TokenType.REPLACE)
95
96 STATEMENT_PARSERS = {
97 **parser.Parser.STATEMENT_PARSERS,
98 TokenType.REPLACE: lambda self: self._parse_create(),
99 }
100
101 FUNCTION_PARSERS = {
102 **parser.Parser.FUNCTION_PARSERS,
103 "RANGE_N": lambda self: self._parse_rangen(),
104 "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
105 }
106
107 def _parse_translate(self, strict: bool) -> exp.Expression:
108 this = self._parse_conjunction()
109
110 if not self._match(TokenType.USING):
111 self.raise_error("Expected USING in TRANSLATE")
112
113 if self._match_texts(self.CHARSET_TRANSLATORS):
114 charset_split = self._prev.text.split("_TO_")
115 to = self.expression(exp.CharacterSet, this=charset_split[1])
116 else:
117 self.raise_error("Expected a character set translator after USING in TRANSLATE")
118
119 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
120
121 # FROM before SET in Teradata UPDATE syntax
122 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
123 def _parse_update(self) -> exp.Update:
124 return self.expression(
125 exp.Update,
126 **{ # type: ignore
127 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
128 "from": self._parse_from(joins=True),
129 "expressions": self._match(TokenType.SET)
130 and self._parse_csv(self._parse_equality),
131 "where": self._parse_where(),
132 },
133 )
134
135 def _parse_rangen(self):
136 this = self._parse_id_var()
137 self._match(TokenType.BETWEEN)
138
139 expressions = self._parse_csv(self._parse_conjunction)
140 each = self._match_text_seq("EACH") and self._parse_conjunction()
141
142 return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
143
144 class Generator(generator.Generator):
145 JOIN_HINTS = False
146 TABLE_HINTS = False
147 QUERY_HINTS = False
148
149 TYPE_MAPPING = {
150 **generator.Generator.TYPE_MAPPING,
151 exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
152 }
153
154 PROPERTIES_LOCATION = {
155 **generator.Generator.PROPERTIES_LOCATION,
156 exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
157 exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
158 exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
159 }
160
161 TRANSFORMS = {
162 **generator.Generator.TRANSFORMS,
163 exp.Max: max_or_greatest,
164 exp.Min: min_or_least,
165 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
166 exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
167 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
168 }
169
170 def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
171 return f"PARTITION BY {self.sql(expression, 'this')}"
172
173 # FROM before SET in Teradata UPDATE syntax
174 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
175 def update_sql(self, expression: exp.Update) -> str:
176 this = self.sql(expression, "this")
177 from_sql = self.sql(expression, "from")
178 set_sql = self.expressions(expression, flat=True)
179 where_sql = self.sql(expression, "where")
180 sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
181 return self.prepend_ctes(expression, sql)
182
183 def mod_sql(self, expression: exp.Mod) -> str:
184 return self.binary(expression, "MOD")
185
186 def datatype_sql(self, expression: exp.DataType) -> str:
187 type_sql = super().datatype_sql(expression)
188 prefix_sql = expression.args.get("prefix")
189 return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
190
191 def rangen_sql(self, expression: exp.RangeN) -> str:
192 this = self.sql(expression, "this")
193 expressions_sql = self.expressions(expression)
194 each_sql = self.sql(expression, "each")
195 each_sql = f" EACH {each_sql}" if each_sql else ""
196
197 return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
198
199 def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
200 kind = self.sql(expression, "kind").upper()
201 if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
202 this_name = self.sql(expression.this, "this")
203 this_properties = self.properties(
204 exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
205 wrapped=False,
206 prefix=",",
207 )
208 this_schema = self.schema_columns_sql(expression.this)
209 return f"{this_name}{this_properties}{self.sep()}{this_schema}"
210
211 return super().createable_sql(expression, locations)
212
[end of sqlglot/dialects/teradata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py
--- a/sqlglot/dialects/teradata.py
+++ b/sqlglot/dialects/teradata.py
@@ -95,6 +95,9 @@
STATEMENT_PARSERS = {
**parser.Parser.STATEMENT_PARSERS,
+ TokenType.DATABASE: lambda self: self.expression(
+ exp.Use, this=self._parse_table(schema=False)
+ ),
TokenType.REPLACE: lambda self: self._parse_create(),
}
@@ -165,6 +168,7 @@
exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
+ exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}",
}
def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
| {"golden_diff": "diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py\n--- a/sqlglot/dialects/teradata.py\n+++ b/sqlglot/dialects/teradata.py\n@@ -95,6 +95,9 @@\n \n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n+ TokenType.DATABASE: lambda self: self.expression(\n+ exp.Use, this=self._parse_table(schema=False)\n+ ),\n TokenType.REPLACE: lambda self: self._parse_create(),\n }\n \n@@ -165,6 +168,7 @@\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToDate: lambda self, e: f\"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n+ exp.Use: lambda self, e: f\"DATABASE {self.sql(e, 'this')}\",\n }\n \n def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:\n", "issue": "Teradata to Databricks transpilation issue\nWhen working on Teradata to Databricks conversion i ran into a issue where a command like \"Database databasename\" which is equvalent to \"USE Databasename\" on Databricks side is not getting tranpiled correctly. The final output still shows the original command.\r\n\r\n\r\n**Sample code** below\r\n```\r\nsql=\"\"\"\r\n\tDATABASE tduser;\r\n\"\"\"\r\n\r\ntry:\r\n new_query = sqlglot.transpile(sql, read='teradata', write='databricks', pretty=True)[0]\r\n print(new_query)\r\nexcept sqlglot.errors.ParseError as error:\r\n print(traceback.format_exc())\r\n print(error.errors)\r\n```\r\n**Expected output:**\r\nUSE tduser\r\n\r\n**Official Documentation**\r\nhttps://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-usedb.html\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, max_or_greatest, min_or_least\nfrom sqlglot.tokens import TokenType\n\n\nclass Teradata(Dialect):\n TIME_MAPPING = {\n \"Y\": \"%Y\",\n \"YYYY\": \"%Y\",\n \"YY\": \"%y\",\n \"MMMM\": \"%B\",\n \"MMM\": \"%b\",\n \"DD\": \"%d\",\n \"D\": \"%-d\",\n \"HH\": \"%H\",\n \"H\": \"%-H\",\n \"MM\": \"%M\",\n \"M\": \"%-M\",\n \"SS\": \"%S\",\n \"S\": \"%-S\",\n \"SSSSSS\": \"%f\",\n \"E\": \"%a\",\n \"EE\": \"%a\",\n \"EEE\": \"%a\",\n \"EEEE\": \"%A\",\n }\n\n class Tokenizer(tokens.Tokenizer):\n # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"^=\": TokenType.NEQ,\n \"BYTEINT\": TokenType.SMALLINT,\n \"COLLECT\": TokenType.COMMAND,\n \"GE\": TokenType.GTE,\n \"GT\": TokenType.GT,\n \"HELP\": TokenType.COMMAND,\n \"INS\": TokenType.INSERT,\n \"LE\": TokenType.LTE,\n \"LT\": TokenType.LT,\n \"MOD\": TokenType.MOD,\n \"NE\": TokenType.NEQ,\n \"NOT=\": TokenType.NEQ,\n \"SEL\": TokenType.SELECT,\n \"ST_GEOMETRY\": TokenType.GEOMETRY,\n \"TOP\": TokenType.TOP,\n }\n\n # Teradata does not support % as a modulo operator\n SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}\n SINGLE_TOKENS.pop(\"%\")\n\n class Parser(parser.Parser):\n CHARSET_TRANSLATORS = {\n \"GRAPHIC_TO_KANJISJIS\",\n \"GRAPHIC_TO_LATIN\",\n \"GRAPHIC_TO_UNICODE\",\n \"GRAPHIC_TO_UNICODE_PadSpace\",\n \"KANJI1_KanjiEBCDIC_TO_UNICODE\",\n \"KANJI1_KanjiEUC_TO_UNICODE\",\n \"KANJI1_KANJISJIS_TO_UNICODE\",\n \"KANJI1_SBC_TO_UNICODE\",\n \"KANJISJIS_TO_GRAPHIC\",\n \"KANJISJIS_TO_LATIN\",\n \"KANJISJIS_TO_UNICODE\",\n \"LATIN_TO_GRAPHIC\",\n \"LATIN_TO_KANJISJIS\",\n \"LATIN_TO_UNICODE\",\n \"LOCALE_TO_UNICODE\",\n \"UNICODE_TO_GRAPHIC\",\n \"UNICODE_TO_GRAPHIC_PadGraphic\",\n \"UNICODE_TO_GRAPHIC_VarGraphic\",\n \"UNICODE_TO_KANJI1_KanjiEBCDIC\",\n \"UNICODE_TO_KANJI1_KanjiEUC\",\n \"UNICODE_TO_KANJI1_KANJISJIS\",\n \"UNICODE_TO_KANJI1_SBC\",\n \"UNICODE_TO_KANJISJIS\",\n \"UNICODE_TO_LATIN\",\n \"UNICODE_TO_LOCALE\",\n \"UNICODE_TO_UNICODE_FoldSpace\",\n \"UNICODE_TO_UNICODE_Fullwidth\",\n \"UNICODE_TO_UNICODE_Halfwidth\",\n \"UNICODE_TO_UNICODE_NFC\",\n \"UNICODE_TO_UNICODE_NFD\",\n \"UNICODE_TO_UNICODE_NFKC\",\n \"UNICODE_TO_UNICODE_NFKD\",\n }\n\n FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}\n FUNC_TOKENS.remove(TokenType.REPLACE)\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.REPLACE: lambda self: self._parse_create(),\n }\n\n FUNCTION_PARSERS = {\n **parser.Parser.FUNCTION_PARSERS,\n \"RANGE_N\": lambda self: self._parse_rangen(),\n \"TRANSLATE\": lambda self: self._parse_translate(self.STRICT_CAST),\n }\n\n def _parse_translate(self, strict: bool) -> exp.Expression:\n this = self._parse_conjunction()\n\n if not self._match(TokenType.USING):\n self.raise_error(\"Expected USING in TRANSLATE\")\n\n if self._match_texts(self.CHARSET_TRANSLATORS):\n charset_split = self._prev.text.split(\"_TO_\")\n to = self.expression(exp.CharacterSet, this=charset_split[1])\n else:\n self.raise_error(\"Expected a character set translator after USING in TRANSLATE\")\n\n return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)\n\n # FROM before SET in Teradata UPDATE syntax\n # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause\n def _parse_update(self) -> exp.Update:\n return self.expression(\n exp.Update,\n **{ # type: ignore\n \"this\": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),\n \"from\": self._parse_from(joins=True),\n \"expressions\": self._match(TokenType.SET)\n and self._parse_csv(self._parse_equality),\n \"where\": self._parse_where(),\n },\n )\n\n def _parse_rangen(self):\n this = self._parse_id_var()\n self._match(TokenType.BETWEEN)\n\n expressions = self._parse_csv(self._parse_conjunction)\n each = self._match_text_seq(\"EACH\") and self._parse_conjunction()\n\n return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)\n\n class Generator(generator.Generator):\n JOIN_HINTS = False\n TABLE_HINTS = False\n QUERY_HINTS = False\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.GEOMETRY: \"ST_GEOMETRY\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,\n exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,\n exp.StabilityProperty: exp.Properties.Location.POST_CREATE,\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.Max: max_or_greatest,\n exp.Min: min_or_least,\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToDate: lambda self, e: f\"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n }\n\n def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:\n return f\"PARTITION BY {self.sql(expression, 'this')}\"\n\n # FROM before SET in Teradata UPDATE syntax\n # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause\n def update_sql(self, expression: exp.Update) -> str:\n this = self.sql(expression, \"this\")\n from_sql = self.sql(expression, \"from\")\n set_sql = self.expressions(expression, flat=True)\n where_sql = self.sql(expression, \"where\")\n sql = f\"UPDATE {this}{from_sql} SET {set_sql}{where_sql}\"\n return self.prepend_ctes(expression, sql)\n\n def mod_sql(self, expression: exp.Mod) -> str:\n return self.binary(expression, \"MOD\")\n\n def datatype_sql(self, expression: exp.DataType) -> str:\n type_sql = super().datatype_sql(expression)\n prefix_sql = expression.args.get(\"prefix\")\n return f\"SYSUDTLIB.{type_sql}\" if prefix_sql else type_sql\n\n def rangen_sql(self, expression: exp.RangeN) -> str:\n this = self.sql(expression, \"this\")\n expressions_sql = self.expressions(expression)\n each_sql = self.sql(expression, \"each\")\n each_sql = f\" EACH {each_sql}\" if each_sql else \"\"\n\n return f\"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})\"\n\n def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:\n kind = self.sql(expression, \"kind\").upper()\n if kind == \"TABLE\" and locations.get(exp.Properties.Location.POST_NAME):\n this_name = self.sql(expression.this, \"this\")\n this_properties = self.properties(\n exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),\n wrapped=False,\n prefix=\",\",\n )\n this_schema = self.schema_columns_sql(expression.this)\n return f\"{this_name}{this_properties}{self.sep()}{this_schema}\"\n\n return super().createable_sql(expression, locations)\n", "path": "sqlglot/dialects/teradata.py"}]} | 3,224 | 250 |
gh_patches_debug_16331 | rasdani/github-patches | git_diff | dask__distributed-2921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[UI][FEATURE] Invalid worker or task keys should return 404 instead of 500 HTML status code
E.g. http://host:8787/info/task/foobared.html returns `500: Internal Server Error`.
Should instead return a friendlier 404 page.
</issue>
<code>
[start of distributed/dashboard/scheduler_html.py]
1 from datetime import datetime
2
3 from dask.utils import format_bytes
4 import toolz
5 from tornado import escape
6
7 from ..utils import log_errors, format_time
8 from .proxy import GlobalProxyHandler
9 from .utils import RequestHandler, redirect
10
11 ns = {
12 func.__name__: func for func in [format_bytes, format_time, datetime.fromtimestamp]
13 }
14
15
16 class Workers(RequestHandler):
17 def get(self):
18 with log_errors():
19 self.render(
20 "workers.html",
21 title="Workers",
22 scheduler=self.server,
23 **toolz.merge(self.server.__dict__, ns, self.extra)
24 )
25
26
27 class Worker(RequestHandler):
28 def get(self, worker):
29 worker = escape.url_unescape(worker)
30 with log_errors():
31 self.render(
32 "worker.html",
33 title="Worker: " + worker,
34 scheduler=self.server,
35 Worker=worker,
36 **toolz.merge(self.server.__dict__, ns, self.extra)
37 )
38
39
40 class Task(RequestHandler):
41 def get(self, task):
42 task = escape.url_unescape(task)
43 with log_errors():
44 self.render(
45 "task.html",
46 title="Task: " + task,
47 Task=task,
48 scheduler=self.server,
49 **toolz.merge(self.server.__dict__, ns, self.extra)
50 )
51
52
53 class Logs(RequestHandler):
54 def get(self):
55 with log_errors():
56 logs = self.server.get_logs()
57 self.render("logs.html", title="Logs", logs=logs, **self.extra)
58
59
60 class WorkerLogs(RequestHandler):
61 async def get(self, worker):
62 with log_errors():
63 worker = escape.url_unescape(worker)
64 logs = await self.server.get_worker_logs(workers=[worker])
65 logs = logs[worker]
66 self.render("logs.html", title="Logs: " + worker, logs=logs, **self.extra)
67
68
69 class WorkerCallStacks(RequestHandler):
70 async def get(self, worker):
71 with log_errors():
72 worker = escape.url_unescape(worker)
73 keys = self.server.processing[worker]
74 call_stack = await self.server.get_call_stack(keys=keys)
75 self.render(
76 "call-stack.html",
77 title="Call Stacks: " + worker,
78 call_stack=call_stack,
79 **self.extra
80 )
81
82
83 class TaskCallStack(RequestHandler):
84 async def get(self, key):
85 with log_errors():
86 key = escape.url_unescape(key)
87 call_stack = await self.server.get_call_stack(keys=[key])
88 if not call_stack:
89 self.write(
90 "<p>Task not actively running. "
91 "It may be finished or not yet started</p>"
92 )
93 else:
94 self.render(
95 "call-stack.html",
96 title="Call Stack: " + key,
97 call_stack=call_stack,
98 **self.extra
99 )
100
101
102 class CountsJSON(RequestHandler):
103 def get(self):
104 scheduler = self.server
105 erred = 0
106 nbytes = 0
107 nthreads = 0
108 memory = 0
109 processing = 0
110 released = 0
111 waiting = 0
112 waiting_data = 0
113
114 for ts in scheduler.tasks.values():
115 if ts.exception_blame is not None:
116 erred += 1
117 elif ts.state == "released":
118 released += 1
119 if ts.waiting_on:
120 waiting += 1
121 if ts.waiters:
122 waiting_data += 1
123 for ws in scheduler.workers.values():
124 nthreads += ws.nthreads
125 memory += len(ws.has_what)
126 nbytes += ws.nbytes
127 processing += len(ws.processing)
128
129 response = {
130 "bytes": nbytes,
131 "clients": len(scheduler.clients),
132 "cores": nthreads,
133 "erred": erred,
134 "hosts": len(scheduler.host_info),
135 "idle": len(scheduler.idle),
136 "memory": memory,
137 "processing": processing,
138 "released": released,
139 "saturated": len(scheduler.saturated),
140 "tasks": len(scheduler.tasks),
141 "unrunnable": len(scheduler.unrunnable),
142 "waiting": waiting,
143 "waiting_data": waiting_data,
144 "workers": len(scheduler.workers),
145 }
146 self.write(response)
147
148
149 class IdentityJSON(RequestHandler):
150 def get(self):
151 self.write(self.server.identity())
152
153
154 class IndexJSON(RequestHandler):
155 def get(self):
156 with log_errors():
157 r = [url for url, _ in routes if url.endswith(".json")]
158 self.render(
159 "json-index.html", routes=r, title="Index of JSON routes", **self.extra
160 )
161
162
163 class IndividualPlots(RequestHandler):
164 def get(self):
165 bokeh_server = self.server.services["dashboard"]
166 result = {
167 uri.strip("/").replace("-", " ").title(): uri
168 for uri in bokeh_server.apps
169 if uri.lstrip("/").startswith("individual-") and not uri.endswith(".json")
170 }
171 self.write(result)
172
173
174 class _PrometheusCollector(object):
175 def __init__(self, server):
176 self.server = server
177
178 def collect(self):
179 from prometheus_client.core import GaugeMetricFamily
180
181 yield GaugeMetricFamily(
182 "dask_scheduler_workers",
183 "Number of workers connected.",
184 value=len(self.server.workers),
185 )
186 yield GaugeMetricFamily(
187 "dask_scheduler_clients",
188 "Number of clients connected.",
189 value=len(self.server.clients),
190 )
191 yield GaugeMetricFamily(
192 "dask_scheduler_received_tasks",
193 "Number of tasks received at scheduler",
194 value=len(self.server.tasks),
195 )
196 yield GaugeMetricFamily(
197 "dask_scheduler_unrunnable_tasks",
198 "Number of unrunnable tasks at scheduler",
199 value=len(self.server.unrunnable),
200 )
201
202
203 class PrometheusHandler(RequestHandler):
204 _initialized = False
205
206 def __init__(self, *args, **kwargs):
207 import prometheus_client
208
209 super(PrometheusHandler, self).__init__(*args, **kwargs)
210
211 if PrometheusHandler._initialized:
212 return
213
214 prometheus_client.REGISTRY.register(_PrometheusCollector(self.server))
215
216 PrometheusHandler._initialized = True
217
218 def get(self):
219 import prometheus_client
220
221 self.write(prometheus_client.generate_latest())
222 self.set_header("Content-Type", "text/plain; version=0.0.4")
223
224
225 class HealthHandler(RequestHandler):
226 def get(self):
227 self.write("ok")
228 self.set_header("Content-Type", "text/plain")
229
230
231 routes = [
232 (r"info", redirect("info/main/workers.html")),
233 (r"info/main/workers.html", Workers),
234 (r"info/worker/(.*).html", Worker),
235 (r"info/task/(.*).html", Task),
236 (r"info/main/logs.html", Logs),
237 (r"info/call-stacks/(.*).html", WorkerCallStacks),
238 (r"info/call-stack/(.*).html", TaskCallStack),
239 (r"info/logs/(.*).html", WorkerLogs),
240 (r"json/counts.json", CountsJSON),
241 (r"json/identity.json", IdentityJSON),
242 (r"json/index.html", IndexJSON),
243 (r"individual-plots.json", IndividualPlots),
244 (r"metrics", PrometheusHandler),
245 (r"health", HealthHandler),
246 (r"proxy/(\d+)/(.*?)/(.*)", GlobalProxyHandler),
247 ]
248
249
250 def get_handlers(server):
251 return [(url, cls, {"server": server}) for url, cls in routes]
252
[end of distributed/dashboard/scheduler_html.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/dashboard/scheduler_html.py b/distributed/dashboard/scheduler_html.py
--- a/distributed/dashboard/scheduler_html.py
+++ b/distributed/dashboard/scheduler_html.py
@@ -27,6 +27,9 @@
class Worker(RequestHandler):
def get(self, worker):
worker = escape.url_unescape(worker)
+ if worker not in self.server.workers:
+ self.send_error(404)
+ return
with log_errors():
self.render(
"worker.html",
@@ -40,6 +43,9 @@
class Task(RequestHandler):
def get(self, task):
task = escape.url_unescape(task)
+ if task not in self.server.tasks:
+ self.send_error(404)
+ return
with log_errors():
self.render(
"task.html",
| {"golden_diff": "diff --git a/distributed/dashboard/scheduler_html.py b/distributed/dashboard/scheduler_html.py\n--- a/distributed/dashboard/scheduler_html.py\n+++ b/distributed/dashboard/scheduler_html.py\n@@ -27,6 +27,9 @@\n class Worker(RequestHandler):\n def get(self, worker):\n worker = escape.url_unescape(worker)\n+ if worker not in self.server.workers:\n+ self.send_error(404)\n+ return\n with log_errors():\n self.render(\n \"worker.html\",\n@@ -40,6 +43,9 @@\n class Task(RequestHandler):\n def get(self, task):\n task = escape.url_unescape(task)\n+ if task not in self.server.tasks:\n+ self.send_error(404)\n+ return\n with log_errors():\n self.render(\n \"task.html\",\n", "issue": "[UI][FEATURE] Invalid worker or task keys should return 404 instead of 500 HTML status code\nE.g. http://host:8787/info/task/foobared.html returns `500: Internal Server Error`.\r\n\r\nShould instead return a friendlier 404 page.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom dask.utils import format_bytes\nimport toolz\nfrom tornado import escape\n\nfrom ..utils import log_errors, format_time\nfrom .proxy import GlobalProxyHandler\nfrom .utils import RequestHandler, redirect\n\nns = {\n func.__name__: func for func in [format_bytes, format_time, datetime.fromtimestamp]\n}\n\n\nclass Workers(RequestHandler):\n def get(self):\n with log_errors():\n self.render(\n \"workers.html\",\n title=\"Workers\",\n scheduler=self.server,\n **toolz.merge(self.server.__dict__, ns, self.extra)\n )\n\n\nclass Worker(RequestHandler):\n def get(self, worker):\n worker = escape.url_unescape(worker)\n with log_errors():\n self.render(\n \"worker.html\",\n title=\"Worker: \" + worker,\n scheduler=self.server,\n Worker=worker,\n **toolz.merge(self.server.__dict__, ns, self.extra)\n )\n\n\nclass Task(RequestHandler):\n def get(self, task):\n task = escape.url_unescape(task)\n with log_errors():\n self.render(\n \"task.html\",\n title=\"Task: \" + task,\n Task=task,\n scheduler=self.server,\n **toolz.merge(self.server.__dict__, ns, self.extra)\n )\n\n\nclass Logs(RequestHandler):\n def get(self):\n with log_errors():\n logs = self.server.get_logs()\n self.render(\"logs.html\", title=\"Logs\", logs=logs, **self.extra)\n\n\nclass WorkerLogs(RequestHandler):\n async def get(self, worker):\n with log_errors():\n worker = escape.url_unescape(worker)\n logs = await self.server.get_worker_logs(workers=[worker])\n logs = logs[worker]\n self.render(\"logs.html\", title=\"Logs: \" + worker, logs=logs, **self.extra)\n\n\nclass WorkerCallStacks(RequestHandler):\n async def get(self, worker):\n with log_errors():\n worker = escape.url_unescape(worker)\n keys = self.server.processing[worker]\n call_stack = await self.server.get_call_stack(keys=keys)\n self.render(\n \"call-stack.html\",\n title=\"Call Stacks: \" + worker,\n call_stack=call_stack,\n **self.extra\n )\n\n\nclass TaskCallStack(RequestHandler):\n async def get(self, key):\n with log_errors():\n key = escape.url_unescape(key)\n call_stack = await self.server.get_call_stack(keys=[key])\n if not call_stack:\n self.write(\n \"<p>Task not actively running. \"\n \"It may be finished or not yet started</p>\"\n )\n else:\n self.render(\n \"call-stack.html\",\n title=\"Call Stack: \" + key,\n call_stack=call_stack,\n **self.extra\n )\n\n\nclass CountsJSON(RequestHandler):\n def get(self):\n scheduler = self.server\n erred = 0\n nbytes = 0\n nthreads = 0\n memory = 0\n processing = 0\n released = 0\n waiting = 0\n waiting_data = 0\n\n for ts in scheduler.tasks.values():\n if ts.exception_blame is not None:\n erred += 1\n elif ts.state == \"released\":\n released += 1\n if ts.waiting_on:\n waiting += 1\n if ts.waiters:\n waiting_data += 1\n for ws in scheduler.workers.values():\n nthreads += ws.nthreads\n memory += len(ws.has_what)\n nbytes += ws.nbytes\n processing += len(ws.processing)\n\n response = {\n \"bytes\": nbytes,\n \"clients\": len(scheduler.clients),\n \"cores\": nthreads,\n \"erred\": erred,\n \"hosts\": len(scheduler.host_info),\n \"idle\": len(scheduler.idle),\n \"memory\": memory,\n \"processing\": processing,\n \"released\": released,\n \"saturated\": len(scheduler.saturated),\n \"tasks\": len(scheduler.tasks),\n \"unrunnable\": len(scheduler.unrunnable),\n \"waiting\": waiting,\n \"waiting_data\": waiting_data,\n \"workers\": len(scheduler.workers),\n }\n self.write(response)\n\n\nclass IdentityJSON(RequestHandler):\n def get(self):\n self.write(self.server.identity())\n\n\nclass IndexJSON(RequestHandler):\n def get(self):\n with log_errors():\n r = [url for url, _ in routes if url.endswith(\".json\")]\n self.render(\n \"json-index.html\", routes=r, title=\"Index of JSON routes\", **self.extra\n )\n\n\nclass IndividualPlots(RequestHandler):\n def get(self):\n bokeh_server = self.server.services[\"dashboard\"]\n result = {\n uri.strip(\"/\").replace(\"-\", \" \").title(): uri\n for uri in bokeh_server.apps\n if uri.lstrip(\"/\").startswith(\"individual-\") and not uri.endswith(\".json\")\n }\n self.write(result)\n\n\nclass _PrometheusCollector(object):\n def __init__(self, server):\n self.server = server\n\n def collect(self):\n from prometheus_client.core import GaugeMetricFamily\n\n yield GaugeMetricFamily(\n \"dask_scheduler_workers\",\n \"Number of workers connected.\",\n value=len(self.server.workers),\n )\n yield GaugeMetricFamily(\n \"dask_scheduler_clients\",\n \"Number of clients connected.\",\n value=len(self.server.clients),\n )\n yield GaugeMetricFamily(\n \"dask_scheduler_received_tasks\",\n \"Number of tasks received at scheduler\",\n value=len(self.server.tasks),\n )\n yield GaugeMetricFamily(\n \"dask_scheduler_unrunnable_tasks\",\n \"Number of unrunnable tasks at scheduler\",\n value=len(self.server.unrunnable),\n )\n\n\nclass PrometheusHandler(RequestHandler):\n _initialized = False\n\n def __init__(self, *args, **kwargs):\n import prometheus_client\n\n super(PrometheusHandler, self).__init__(*args, **kwargs)\n\n if PrometheusHandler._initialized:\n return\n\n prometheus_client.REGISTRY.register(_PrometheusCollector(self.server))\n\n PrometheusHandler._initialized = True\n\n def get(self):\n import prometheus_client\n\n self.write(prometheus_client.generate_latest())\n self.set_header(\"Content-Type\", \"text/plain; version=0.0.4\")\n\n\nclass HealthHandler(RequestHandler):\n def get(self):\n self.write(\"ok\")\n self.set_header(\"Content-Type\", \"text/plain\")\n\n\nroutes = [\n (r\"info\", redirect(\"info/main/workers.html\")),\n (r\"info/main/workers.html\", Workers),\n (r\"info/worker/(.*).html\", Worker),\n (r\"info/task/(.*).html\", Task),\n (r\"info/main/logs.html\", Logs),\n (r\"info/call-stacks/(.*).html\", WorkerCallStacks),\n (r\"info/call-stack/(.*).html\", TaskCallStack),\n (r\"info/logs/(.*).html\", WorkerLogs),\n (r\"json/counts.json\", CountsJSON),\n (r\"json/identity.json\", IdentityJSON),\n (r\"json/index.html\", IndexJSON),\n (r\"individual-plots.json\", IndividualPlots),\n (r\"metrics\", PrometheusHandler),\n (r\"health\", HealthHandler),\n (r\"proxy/(\\d+)/(.*?)/(.*)\", GlobalProxyHandler),\n]\n\n\ndef get_handlers(server):\n return [(url, cls, {\"server\": server}) for url, cls in routes]\n", "path": "distributed/dashboard/scheduler_html.py"}]} | 2,887 | 183 |
gh_patches_debug_3036 | rasdani/github-patches | git_diff | archlinux__archinstall-184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gnome-extra provides WAY too much bloatware
I can't imagine most people wanting all the packages this installs on a new installation. Most of these applications are things like games and advanced tools like dconf-editor that your average user should not be touching. Some of them are nice to have but can be installed later manually instead of during initial installation.
</issue>
<code>
[start of profiles/applications/gnome.py]
1 import archinstall
2
3 installation.add_additional_packages("gnome gnome-extra gdm") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need.
4 # Note: gdm should be part of the gnome group, but adding it here for clarity
[end of profiles/applications/gnome.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profiles/applications/gnome.py b/profiles/applications/gnome.py
--- a/profiles/applications/gnome.py
+++ b/profiles/applications/gnome.py
@@ -1,4 +1,4 @@
import archinstall
-installation.add_additional_packages("gnome gnome-extra gdm") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need.
-# Note: gdm should be part of the gnome group, but adding it here for clarity
\ No newline at end of file
+installation.add_additional_packages("gnome gnome-tweaks gnome-todo gnome-sound-recorder evolution gdm")
+# Note: gdm should be part of the gnome group, but adding it here for clarity
| {"golden_diff": "diff --git a/profiles/applications/gnome.py b/profiles/applications/gnome.py\n--- a/profiles/applications/gnome.py\n+++ b/profiles/applications/gnome.py\n@@ -1,4 +1,4 @@\n import archinstall\n \n-installation.add_additional_packages(\"gnome gnome-extra gdm\") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need.\n-# Note: gdm should be part of the gnome group, but adding it here for clarity\n\\ No newline at end of file\n+installation.add_additional_packages(\"gnome gnome-tweaks gnome-todo gnome-sound-recorder evolution gdm\")\n+# Note: gdm should be part of the gnome group, but adding it here for clarity\n", "issue": "gnome-extra provides WAY too much bloatware\nI can't imagine most people wanting all the packages this installs on a new installation. Most of these applications are things like games and advanced tools like dconf-editor that your average user should not be touching. Some of them are nice to have but can be installed later manually instead of during initial installation.\n", "before_files": [{"content": "import archinstall\n\ninstallation.add_additional_packages(\"gnome gnome-extra gdm\") # We'll create a gnome-minimal later, but for now, we'll avoid issues by giving more than we need.\n# Note: gdm should be part of the gnome group, but adding it here for clarity", "path": "profiles/applications/gnome.py"}]} | 673 | 165 |
gh_patches_debug_6449 | rasdani/github-patches | git_diff | getpelican__pelican-2818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plugins run twice in autoreload mode
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this, let’s make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill them first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I have read the [Filing Issues](https://docs.getpelican.com/en/latest/contribute.html#filing-issues) and subsequent “How to Get Help” sections of the documentation.
- [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate.
<!--
Once the above boxes are checked, if you are able to fill in the following list
with your information, it would be very helpful for maintainers.
-->
- **OS version and name**: Ubuntu 18.04.5 LTS (WSL)
- **Python version**: 3.8.6
- **Pelican version**: e4d9c41a
## To reproduce
Create these files:
```
.
├── content
│ └── 1.rst
├── pelicanconf.py
└── plugins
└── test_plugin.py
```
```rst
.. content/1.rst
####
test
####
:date: 1970-01-01
:modified: 1970-01-01
TEST
```
```python
# pelicanconf.py
PLUGINS = [
'test_plugin',
]
PLUGIN_PATHS = [
'plugins',
]
```
```python
# plugins/test_plugin.py
import logging
from pelican import signals
logger = logging.getLogger(__name__)
def test_function(content):
logger.info('test plugin loaded')
test = content._content
test += 'TEST'
content._content = test
logger.info(content._content)
def register():
signals.content_object_init.connect(test_function)
```
Then run:
```console
$ pelican -rD --logs-dedup-min-level DEBUG content
[Not showing unrelated logs]
-> test plugin loaded
-> <p>TEST</p>
| TEST
-> test plugin loaded
-> <p>TEST</p>
| TESTTEST
$ pelican -D --logs-dedup-min-level DEBUG content
[Not showing unrelated logs]
-> test plugin loaded
-> <p>TEST</p>
| TEST
```
The plugin runs twice with `-r`, but only once without `-r`.
## The casue of the issue
After a bisect, I found that commit ed1eca16 introduced this issue.
```console
$ git checkout ed1eca16^
$ pelican -rD --logs-dedup-min-level DEBUG content
[Not showing unrelated logs]
-> test plugin loaded
-> <p>TEST</p>
| TEST
$ git checkout ed1eca16
$ pelican -rD --logs-dedup-min-level DEBUG content
[Not showing unrelated logs]
-> test plugin loaded
-> <p>TEST</p>
| TEST
-> test plugin loaded
-> <p>TEST</p>
| TESTTEST
```
After adding this line, I found that the plugin was registered twice:
```diff
diff --git a/pelican/contents.py b/pelican/contents.py
index 594cd3b5..ff991be9 100644
--- a/pelican/contents.py
+++ b/pelican/contents.py
@@ -139,6 +139,7 @@ class Content(object):
if 'summary' in metadata:
self._summary = metadata['summary']
+ logger.info(str(signals.content_object_init.receivers))
signals.content_object_init.send(self)
def __str__(self):
```
```console
$ pelican -rD --logs-dedup-min-level DEBUG content
[Not showing unrelated logs]
-> {140542259499216: <weakref at 0x7fd28a2b8b80; to 'function' at 0x7fd28b7410d0 (test_function)>, 140542259600400: <weakref at 0x7fd28b7525e0; to 'function' at 0x7fd28b759c10 (test_function)>}
-> test plugin loaded
-> <p>TEST</p>
| TEST
-> test plugin loaded
-> <p>TEST</p>
| TESTTEST
```
But why? The reason:
```python
>>> # Implementation of ed1eca16^
>>> import sys
>>> sys.path.insert(0, 'plugins')
>>> plugin = __import__('test_plugin', globals(), locals(), str('module'))
>>> plugin_ = __import__('test_plugin', globals(), locals(), str('module'))
>>> plugin is plugin_
True
>>>
>>> # Implementation of ed1eca16
>>> import importlib
>>> spec = importlib.machinery.PathFinder.find_spec('test_plugin', ['plugins'])
>>> plugin = importlib.util.module_from_spec(spec)
>>> plugin_ = importlib.util.module_from_spec(spec)
>>> plugin is plugin_
False
```
## How to fix
To fix it, simply avoid loading the same plugin twice:
```diff
diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py
index 4e6ec3c5..699192d3 100644
--- a/pelican/plugins/_utils.py
+++ b/pelican/plugins/_utils.py
@@ -53,6 +53,11 @@ def load_legacy_plugin(plugin, plugin_paths):
if spec is None:
raise ImportError('Cannot import plugin `{}`'.format(plugin))
else:
+ # Avoid loading the same plugin twice
+ try:
+ return sys.modules[spec.name]
+ except KeyError:
+ pass
# create module object from spec
mod = importlib.util.module_from_spec(spec)
# place it into sys.modules cache
```
I will create a pull request after writing some tests for it.
</issue>
<code>
[start of pelican/plugins/_utils.py]
1 import importlib
2 import importlib.machinery
3 import importlib.util
4 import logging
5 import pkgutil
6 import sys
7
8
9 logger = logging.getLogger(__name__)
10
11
12 def iter_namespace(ns_pkg):
13 # Specifying the second argument (prefix) to iter_modules makes the
14 # returned name an absolute name instead of a relative one. This allows
15 # import_module to work without having to do additional modification to
16 # the name.
17 return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
18
19
20 def get_namespace_plugins(ns_pkg=None):
21 if ns_pkg is None:
22 import pelican.plugins as ns_pkg
23
24 return {
25 name: importlib.import_module(name)
26 for finder, name, ispkg
27 in iter_namespace(ns_pkg)
28 if ispkg
29 }
30
31
32 def list_plugins(ns_pkg=None):
33 from pelican.log import init as init_logging
34 init_logging(logging.INFO)
35 ns_plugins = get_namespace_plugins(ns_pkg)
36 if ns_plugins:
37 logger.info('Plugins found:\n' + '\n'.join(ns_plugins))
38 else:
39 logger.info('No plugins are installed')
40
41
42 def load_legacy_plugin(plugin, plugin_paths):
43 if '.' in plugin:
44 # it is in a package, try to resolve package first
45 package, _, _ = plugin.rpartition('.')
46 load_legacy_plugin(package, plugin_paths)
47
48 # Try to find plugin in PLUGIN_PATHS
49 spec = importlib.machinery.PathFinder.find_spec(plugin, plugin_paths)
50 if spec is None:
51 # If failed, try to find it in normal importable locations
52 spec = importlib.util.find_spec(plugin)
53 if spec is None:
54 raise ImportError('Cannot import plugin `{}`'.format(plugin))
55 else:
56 # create module object from spec
57 mod = importlib.util.module_from_spec(spec)
58 # place it into sys.modules cache
59 # necessary if module imports itself at some point (e.g. packages)
60 sys.modules[spec.name] = mod
61 try:
62 # try to execute it inside module object
63 spec.loader.exec_module(mod)
64 except Exception: # problem with import
65 try:
66 # remove module from sys.modules since it can't be loaded
67 del sys.modules[spec.name]
68 except KeyError:
69 pass
70 raise
71
72 # if all went well, we have the plugin module
73 return mod
74
75
76 def load_plugins(settings):
77 logger.debug('Finding namespace plugins')
78 namespace_plugins = get_namespace_plugins()
79 if namespace_plugins:
80 logger.debug('Namespace plugins found:\n' +
81 '\n'.join(namespace_plugins))
82 plugins = []
83 if settings.get('PLUGINS') is not None:
84 for plugin in settings['PLUGINS']:
85 if isinstance(plugin, str):
86 logger.debug('Loading plugin `%s`', plugin)
87 # try to find in namespace plugins
88 if plugin in namespace_plugins:
89 plugin = namespace_plugins[plugin]
90 elif 'pelican.plugins.{}'.format(plugin) in namespace_plugins:
91 plugin = namespace_plugins['pelican.plugins.{}'.format(
92 plugin)]
93 # try to import it
94 else:
95 try:
96 plugin = load_legacy_plugin(
97 plugin,
98 settings.get('PLUGIN_PATHS', []))
99 except ImportError as e:
100 logger.error('Cannot load plugin `%s`\n%s', plugin, e)
101 continue
102 plugins.append(plugin)
103 else:
104 plugins = list(namespace_plugins.values())
105
106 return plugins
107
[end of pelican/plugins/_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py
--- a/pelican/plugins/_utils.py
+++ b/pelican/plugins/_utils.py
@@ -53,6 +53,9 @@
if spec is None:
raise ImportError('Cannot import plugin `{}`'.format(plugin))
else:
+ # Avoid loading the same plugin twice
+ if spec.name in sys.modules:
+ return sys.modules[spec.name]
# create module object from spec
mod = importlib.util.module_from_spec(spec)
# place it into sys.modules cache
| {"golden_diff": "diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py\n--- a/pelican/plugins/_utils.py\n+++ b/pelican/plugins/_utils.py\n@@ -53,6 +53,9 @@\n if spec is None:\n raise ImportError('Cannot import plugin `{}`'.format(plugin))\n else:\n+ # Avoid loading the same plugin twice\n+ if spec.name in sys.modules:\n+ return sys.modules[spec.name]\n # create module object from spec\n mod = importlib.util.module_from_spec(spec)\n # place it into sys.modules cache\n", "issue": "Plugins run twice in autoreload mode\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this, let\u2019s make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill them first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have read the [Filing Issues](https://docs.getpelican.com/en/latest/contribute.html#filing-issues) and subsequent \u201cHow to Get Help\u201d sections of the documentation.\r\n- [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate.\r\n\r\n<!--\r\n Once the above boxes are checked, if you are able to fill in the following list\r\n with your information, it would be very helpful for maintainers.\r\n-->\r\n\r\n- **OS version and name**: Ubuntu 18.04.5 LTS (WSL)\r\n- **Python version**: 3.8.6\r\n- **Pelican version**: e4d9c41a\r\n\r\n## To reproduce\r\n\r\nCreate these files:\r\n\r\n```\r\n.\r\n\u251c\u2500\u2500 content\r\n\u2502 \u2514\u2500\u2500 1.rst\r\n\u251c\u2500\u2500 pelicanconf.py\r\n\u2514\u2500\u2500 plugins\r\n \u2514\u2500\u2500 test_plugin.py\r\n```\r\n\r\n```rst\r\n.. content/1.rst\r\n####\r\ntest\r\n####\r\n\r\n:date: 1970-01-01\r\n:modified: 1970-01-01\r\n\r\nTEST\r\n```\r\n\r\n```python\r\n# pelicanconf.py\r\nPLUGINS = [\r\n 'test_plugin',\r\n]\r\nPLUGIN_PATHS = [\r\n 'plugins',\r\n]\r\n```\r\n\r\n```python\r\n# plugins/test_plugin.py\r\nimport logging\r\n\r\nfrom pelican import signals\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef test_function(content):\r\n logger.info('test plugin loaded')\r\n test = content._content\r\n test += 'TEST'\r\n content._content = test\r\n logger.info(content._content)\r\n\r\n\r\ndef register():\r\n signals.content_object_init.connect(test_function)\r\n```\r\n\r\nThen run:\r\n\r\n```console\r\n$ pelican -rD --logs-dedup-min-level DEBUG content\r\n[Not showing unrelated logs]\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TEST\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TESTTEST\r\n\r\n$ pelican -D --logs-dedup-min-level DEBUG content\r\n[Not showing unrelated logs]\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TEST\r\n```\r\n\r\nThe plugin runs twice with `-r`, but only once without `-r`.\r\n\r\n## The casue of the issue\r\n\r\nAfter a bisect, I found that commit ed1eca16 introduced this issue.\r\n\r\n```console\r\n$ git checkout ed1eca16^\r\n$ pelican -rD --logs-dedup-min-level DEBUG content\r\n[Not showing unrelated logs]\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TEST\r\n\r\n$ git checkout ed1eca16\r\n$ pelican -rD --logs-dedup-min-level DEBUG content\r\n[Not showing unrelated logs]\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TEST\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TESTTEST\r\n```\r\n\r\nAfter adding this line, I found that the plugin was registered twice:\r\n\r\n```diff\r\ndiff --git a/pelican/contents.py b/pelican/contents.py\r\nindex 594cd3b5..ff991be9 100644\r\n--- a/pelican/contents.py\r\n+++ b/pelican/contents.py\r\n@@ -139,6 +139,7 @@ class Content(object):\r\n if 'summary' in metadata:\r\n self._summary = metadata['summary']\r\n\r\n+ logger.info(str(signals.content_object_init.receivers))\r\n signals.content_object_init.send(self)\r\n\r\n def __str__(self):\r\n```\r\n\r\n```console\r\n$ pelican -rD --logs-dedup-min-level DEBUG content\r\n[Not showing unrelated logs]\r\n-> {140542259499216: <weakref at 0x7fd28a2b8b80; to 'function' at 0x7fd28b7410d0 (test_function)>, 140542259600400: <weakref at 0x7fd28b7525e0; to 'function' at 0x7fd28b759c10 (test_function)>}\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TEST\r\n-> test plugin loaded\r\n-> <p>TEST</p>\r\n | TESTTEST\r\n```\r\n\r\nBut why? The reason:\r\n\r\n```python\r\n>>> # Implementation of ed1eca16^\r\n>>> import sys\r\n>>> sys.path.insert(0, 'plugins')\r\n>>> plugin = __import__('test_plugin', globals(), locals(), str('module'))\r\n>>> plugin_ = __import__('test_plugin', globals(), locals(), str('module'))\r\n>>> plugin is plugin_\r\nTrue\r\n>>>\r\n>>> # Implementation of ed1eca16\r\n>>> import importlib\r\n>>> spec = importlib.machinery.PathFinder.find_spec('test_plugin', ['plugins'])\r\n>>> plugin = importlib.util.module_from_spec(spec)\r\n>>> plugin_ = importlib.util.module_from_spec(spec)\r\n>>> plugin is plugin_\r\nFalse\r\n```\r\n\r\n## How to fix\r\n\r\nTo fix it, simply avoid loading the same plugin twice:\r\n\r\n```diff\r\ndiff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py\r\nindex 4e6ec3c5..699192d3 100644\r\n--- a/pelican/plugins/_utils.py\r\n+++ b/pelican/plugins/_utils.py\r\n@@ -53,6 +53,11 @@ def load_legacy_plugin(plugin, plugin_paths):\r\n if spec is None:\r\n raise ImportError('Cannot import plugin `{}`'.format(plugin))\r\n else:\r\n+ # Avoid loading the same plugin twice\r\n+ try:\r\n+ return sys.modules[spec.name]\r\n+ except KeyError:\r\n+ pass\r\n # create module object from spec\r\n mod = importlib.util.module_from_spec(spec)\r\n # place it into sys.modules cache\r\n```\r\n\r\nI will create a pull request after writing some tests for it.\r\n\r\n\n", "before_files": [{"content": "import importlib\nimport importlib.machinery\nimport importlib.util\nimport logging\nimport pkgutil\nimport sys\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef iter_namespace(ns_pkg):\n # Specifying the second argument (prefix) to iter_modules makes the\n # returned name an absolute name instead of a relative one. This allows\n # import_module to work without having to do additional modification to\n # the name.\n return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + \".\")\n\n\ndef get_namespace_plugins(ns_pkg=None):\n if ns_pkg is None:\n import pelican.plugins as ns_pkg\n\n return {\n name: importlib.import_module(name)\n for finder, name, ispkg\n in iter_namespace(ns_pkg)\n if ispkg\n }\n\n\ndef list_plugins(ns_pkg=None):\n from pelican.log import init as init_logging\n init_logging(logging.INFO)\n ns_plugins = get_namespace_plugins(ns_pkg)\n if ns_plugins:\n logger.info('Plugins found:\\n' + '\\n'.join(ns_plugins))\n else:\n logger.info('No plugins are installed')\n\n\ndef load_legacy_plugin(plugin, plugin_paths):\n if '.' in plugin:\n # it is in a package, try to resolve package first\n package, _, _ = plugin.rpartition('.')\n load_legacy_plugin(package, plugin_paths)\n\n # Try to find plugin in PLUGIN_PATHS\n spec = importlib.machinery.PathFinder.find_spec(plugin, plugin_paths)\n if spec is None:\n # If failed, try to find it in normal importable locations\n spec = importlib.util.find_spec(plugin)\n if spec is None:\n raise ImportError('Cannot import plugin `{}`'.format(plugin))\n else:\n # create module object from spec\n mod = importlib.util.module_from_spec(spec)\n # place it into sys.modules cache\n # necessary if module imports itself at some point (e.g. packages)\n sys.modules[spec.name] = mod\n try:\n # try to execute it inside module object\n spec.loader.exec_module(mod)\n except Exception: # problem with import\n try:\n # remove module from sys.modules since it can't be loaded\n del sys.modules[spec.name]\n except KeyError:\n pass\n raise\n\n # if all went well, we have the plugin module\n return mod\n\n\ndef load_plugins(settings):\n logger.debug('Finding namespace plugins')\n namespace_plugins = get_namespace_plugins()\n if namespace_plugins:\n logger.debug('Namespace plugins found:\\n' +\n '\\n'.join(namespace_plugins))\n plugins = []\n if settings.get('PLUGINS') is not None:\n for plugin in settings['PLUGINS']:\n if isinstance(plugin, str):\n logger.debug('Loading plugin `%s`', plugin)\n # try to find in namespace plugins\n if plugin in namespace_plugins:\n plugin = namespace_plugins[plugin]\n elif 'pelican.plugins.{}'.format(plugin) in namespace_plugins:\n plugin = namespace_plugins['pelican.plugins.{}'.format(\n plugin)]\n # try to import it\n else:\n try:\n plugin = load_legacy_plugin(\n plugin,\n settings.get('PLUGIN_PATHS', []))\n except ImportError as e:\n logger.error('Cannot load plugin `%s`\\n%s', plugin, e)\n continue\n plugins.append(plugin)\n else:\n plugins = list(namespace_plugins.values())\n\n return plugins\n", "path": "pelican/plugins/_utils.py"}]} | 2,886 | 130 |
gh_patches_debug_17972 | rasdani/github-patches | git_diff | urllib3__urllib3-911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PoolManagers are overly case-sensitive.
In a URI, both scheme and authority are case-insensitive: that is, the scheme, host, and port should all be considered case insensitively (and basically treated as lower-case).
Unfortunately, the PoolManager and ProxyManager do not consistently do that. The biggest problem is in the ProxyManager, where `connection_from_host` will incorrectly treat any HTTPS scheme with a capital letter in it like a HTTP connection (which isn't great), but the flaw is pretty common elsewhere in those objects (e.g. an uppercased host will get a different connection pool to a lowercased host).
At the very minimum we should be using a lowercased scheme at all points in the urllib3 stack. We should also consider whether we want to be case-sensitive about domains (I'm inclined to say that we do not).
Related: kennethreitz/requests#3075.
</issue>
<code>
[start of urllib3/util/url.py]
1 from __future__ import absolute_import
2 from collections import namedtuple
3
4 from ..exceptions import LocationParseError
5
6
7 url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
8
9
10 class Url(namedtuple('Url', url_attrs)):
11 """
12 Datastructure for representing an HTTP URL. Used as a return value for
13 :func:`parse_url`.
14 """
15 slots = ()
16
17 def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
18 query=None, fragment=None):
19 if path and not path.startswith('/'):
20 path = '/' + path
21 return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
22 query, fragment)
23
24 @property
25 def hostname(self):
26 """For backwards-compatibility with urlparse. We're nice like that."""
27 return self.host
28
29 @property
30 def request_uri(self):
31 """Absolute path including the query string."""
32 uri = self.path or '/'
33
34 if self.query is not None:
35 uri += '?' + self.query
36
37 return uri
38
39 @property
40 def netloc(self):
41 """Network location including host and port"""
42 if self.port:
43 return '%s:%d' % (self.host, self.port)
44 return self.host
45
46 @property
47 def url(self):
48 """
49 Convert self into a url
50
51 This function should more or less round-trip with :func:`.parse_url`. The
52 returned url may not be exactly the same as the url inputted to
53 :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
54 with a blank port will have : removed).
55
56 Example: ::
57
58 >>> U = parse_url('http://google.com/mail/')
59 >>> U.url
60 'http://google.com/mail/'
61 >>> Url('http', 'username:password', 'host.com', 80,
62 ... '/path', 'query', 'fragment').url
63 'http://username:[email protected]:80/path?query#fragment'
64 """
65 scheme, auth, host, port, path, query, fragment = self
66 url = ''
67
68 # We use "is not None" we want things to happen with empty strings (or 0 port)
69 if scheme is not None:
70 url += scheme + '://'
71 if auth is not None:
72 url += auth + '@'
73 if host is not None:
74 url += host
75 if port is not None:
76 url += ':' + str(port)
77 if path is not None:
78 url += path
79 if query is not None:
80 url += '?' + query
81 if fragment is not None:
82 url += '#' + fragment
83
84 return url
85
86 def __str__(self):
87 return self.url
88
89
90 def split_first(s, delims):
91 """
92 Given a string and an iterable of delimiters, split on the first found
93 delimiter. Return two split parts and the matched delimiter.
94
95 If not found, then the first part is the full input string.
96
97 Example::
98
99 >>> split_first('foo/bar?baz', '?/=')
100 ('foo', 'bar?baz', '/')
101 >>> split_first('foo/bar?baz', '123')
102 ('foo/bar?baz', '', None)
103
104 Scales linearly with number of delims. Not ideal for large number of delims.
105 """
106 min_idx = None
107 min_delim = None
108 for d in delims:
109 idx = s.find(d)
110 if idx < 0:
111 continue
112
113 if min_idx is None or idx < min_idx:
114 min_idx = idx
115 min_delim = d
116
117 if min_idx is None or min_idx < 0:
118 return s, '', None
119
120 return s[:min_idx], s[min_idx + 1:], min_delim
121
122
123 def parse_url(url):
124 """
125 Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
126 performed to parse incomplete urls. Fields not provided will be None.
127
128 Partly backwards-compatible with :mod:`urlparse`.
129
130 Example::
131
132 >>> parse_url('http://google.com/mail/')
133 Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
134 >>> parse_url('google.com:80')
135 Url(scheme=None, host='google.com', port=80, path=None, ...)
136 >>> parse_url('/foo?bar')
137 Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
138 """
139
140 # While this code has overlap with stdlib's urlparse, it is much
141 # simplified for our needs and less annoying.
142 # Additionally, this implementations does silly things to be optimal
143 # on CPython.
144
145 if not url:
146 # Empty
147 return Url()
148
149 scheme = None
150 auth = None
151 host = None
152 port = None
153 path = None
154 fragment = None
155 query = None
156
157 # Scheme
158 if '://' in url:
159 scheme, url = url.split('://', 1)
160
161 # Find the earliest Authority Terminator
162 # (http://tools.ietf.org/html/rfc3986#section-3.2)
163 url, path_, delim = split_first(url, ['/', '?', '#'])
164
165 if delim:
166 # Reassemble the path
167 path = delim + path_
168
169 # Auth
170 if '@' in url:
171 # Last '@' denotes end of auth part
172 auth, url = url.rsplit('@', 1)
173
174 # IPv6
175 if url and url[0] == '[':
176 host, url = url.split(']', 1)
177 host += ']'
178
179 # Port
180 if ':' in url:
181 _host, port = url.split(':', 1)
182
183 if not host:
184 host = _host
185
186 if port:
187 # If given, ports must be integers.
188 if not port.isdigit():
189 raise LocationParseError(url)
190 port = int(port)
191 else:
192 # Blank ports are cool, too. (rfc3986#section-3.2.3)
193 port = None
194
195 elif not host and url:
196 host = url
197
198 if not path:
199 return Url(scheme, auth, host, port, path, query, fragment)
200
201 # Fragment
202 if '#' in path:
203 path, fragment = path.split('#', 1)
204
205 # Query
206 if '?' in path:
207 path, query = path.split('?', 1)
208
209 return Url(scheme, auth, host, port, path, query, fragment)
210
211
212 def get_host(url):
213 """
214 Deprecated. Use :func:`parse_url` instead.
215 """
216 p = parse_url(url)
217 return p.scheme or 'http', p.hostname, p.port
218
[end of urllib3/util/url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/urllib3/util/url.py b/urllib3/util/url.py
--- a/urllib3/util/url.py
+++ b/urllib3/util/url.py
@@ -10,7 +10,8 @@
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
- :func:`parse_url`.
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
"""
slots = ()
@@ -18,6 +19,10 @@
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
+ if scheme:
+ scheme = scheme.lower()
+ if host:
+ host = host.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
| {"golden_diff": "diff --git a/urllib3/util/url.py b/urllib3/util/url.py\n--- a/urllib3/util/url.py\n+++ b/urllib3/util/url.py\n@@ -10,7 +10,8 @@\n class Url(namedtuple('Url', url_attrs)):\n \"\"\"\n Datastructure for representing an HTTP URL. Used as a return value for\n- :func:`parse_url`.\n+ :func:`parse_url`. Both the scheme and host are normalized as they are\n+ both case-insensitive according to RFC 3986.\n \"\"\"\n slots = ()\n \n@@ -18,6 +19,10 @@\n query=None, fragment=None):\n if path and not path.startswith('/'):\n path = '/' + path\n+ if scheme:\n+ scheme = scheme.lower()\n+ if host:\n+ host = host.lower()\n return super(Url, cls).__new__(cls, scheme, auth, host, port, path,\n query, fragment)\n", "issue": "PoolManagers are overly case-sensitive.\nIn a URI, both scheme and authority are case-insensitive: that is, the scheme, host, and port should all be considered case insensitively (and basically treated as lower-case).\n\nUnfortunately, the PoolManager and ProxyManager do not consistently do that. The biggest problem is in the ProxyManager, where `connection_from_host` will incorrectly treat any HTTPS scheme with a capital letter in it like a HTTP connection (which isn't great), but the flaw is pretty common elsewhere in those objects (e.g. an uppercased host will get a different connection pool to a lowercased host).\n\nAt the very minimum we should be using a lowercased scheme at all points in the urllib3 stack. We should also consider whether we want to be case-sensitive about domains (I'm inclined to say that we do not).\n\nRelated: kennethreitz/requests#3075.\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom collections import namedtuple\n\nfrom ..exceptions import LocationParseError\n\n\nurl_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']\n\n\nclass Url(namedtuple('Url', url_attrs)):\n \"\"\"\n Datastructure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`.\n \"\"\"\n slots = ()\n\n def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,\n query=None, fragment=None):\n if path and not path.startswith('/'):\n path = '/' + path\n return super(Url, cls).__new__(cls, scheme, auth, host, port, path,\n query, fragment)\n\n @property\n def hostname(self):\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self):\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or '/'\n\n if self.query is not None:\n uri += '?' + self.query\n\n return uri\n\n @property\n def netloc(self):\n \"\"\"Network location including host and port\"\"\"\n if self.port:\n return '%s:%d' % (self.host, self.port)\n return self.host\n\n @property\n def url(self):\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example: ::\n\n >>> U = parse_url('http://google.com/mail/')\n >>> U.url\n 'http://google.com/mail/'\n >>> Url('http', 'username:password', 'host.com', 80,\n ... '/path', 'query', 'fragment').url\n 'http://username:[email protected]:80/path?query#fragment'\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = ''\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + '://'\n if auth is not None:\n url += auth + '@'\n if host is not None:\n url += host\n if port is not None:\n url += ':' + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += '?' + query\n if fragment is not None:\n url += '#' + fragment\n\n return url\n\n def __str__(self):\n return self.url\n\n\ndef split_first(s, delims):\n \"\"\"\n Given a string and an iterable of delimiters, split on the first found\n delimiter. Return two split parts and the matched delimiter.\n\n If not found, then the first part is the full input string.\n\n Example::\n\n >>> split_first('foo/bar?baz', '?/=')\n ('foo', 'bar?baz', '/')\n >>> split_first('foo/bar?baz', '123')\n ('foo/bar?baz', '', None)\n\n Scales linearly with number of delims. Not ideal for large number of delims.\n \"\"\"\n min_idx = None\n min_delim = None\n for d in delims:\n idx = s.find(d)\n if idx < 0:\n continue\n\n if min_idx is None or idx < min_idx:\n min_idx = idx\n min_delim = d\n\n if min_idx is None or min_idx < 0:\n return s, '', None\n\n return s[:min_idx], s[min_idx + 1:], min_delim\n\n\ndef parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n\n # While this code has overlap with stdlib's urlparse, it is much\n # simplified for our needs and less annoying.\n # Additionally, this implementations does silly things to be optimal\n # on CPython.\n\n if not url:\n # Empty\n return Url()\n\n scheme = None\n auth = None\n host = None\n port = None\n path = None\n fragment = None\n query = None\n\n # Scheme\n if '://' in url:\n scheme, url = url.split('://', 1)\n\n # Find the earliest Authority Terminator\n # (http://tools.ietf.org/html/rfc3986#section-3.2)\n url, path_, delim = split_first(url, ['/', '?', '#'])\n\n if delim:\n # Reassemble the path\n path = delim + path_\n\n # Auth\n if '@' in url:\n # Last '@' denotes end of auth part\n auth, url = url.rsplit('@', 1)\n\n # IPv6\n if url and url[0] == '[':\n host, url = url.split(']', 1)\n host += ']'\n\n # Port\n if ':' in url:\n _host, port = url.split(':', 1)\n\n if not host:\n host = _host\n\n if port:\n # If given, ports must be integers.\n if not port.isdigit():\n raise LocationParseError(url)\n port = int(port)\n else:\n # Blank ports are cool, too. (rfc3986#section-3.2.3)\n port = None\n\n elif not host and url:\n host = url\n\n if not path:\n return Url(scheme, auth, host, port, path, query, fragment)\n\n # Fragment\n if '#' in path:\n path, fragment = path.split('#', 1)\n\n # Query\n if '?' in path:\n path, query = path.split('?', 1)\n\n return Url(scheme, auth, host, port, path, query, fragment)\n\n\ndef get_host(url):\n \"\"\"\n Deprecated. Use :func:`parse_url` instead.\n \"\"\"\n p = parse_url(url)\n return p.scheme or 'http', p.hostname, p.port\n", "path": "urllib3/util/url.py"}]} | 2,789 | 216 |
gh_patches_debug_5503 | rasdani/github-patches | git_diff | getsentry__sentry-3604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New Auth Tokens cannot pass sudo check
The new auth token based API access never passes sudo:
``` python
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey)
```
</issue>
<code>
[start of src/sentry/api/decorators.py]
1 from __future__ import absolute_import
2
3 import json
4
5 from django.http import HttpResponse
6 from functools import wraps
7
8 from sentry.models import ApiKey
9
10
11 def is_considered_sudo(request):
12 return request.is_sudo() or \
13 isinstance(request.auth, ApiKey)
14
15
16 def sudo_required(func):
17 @wraps(func)
18 def wrapped(self, request, *args, **kwargs):
19 # If we are already authenticated through an API key we do not
20 # care about the sudo flag.
21 if not is_considered_sudo(request):
22 # TODO(dcramer): support some kind of auth flow to allow this
23 # externally
24 data = {
25 "error": "Account verification required.",
26 "sudoRequired": True,
27 "username": request.user.username,
28 }
29 return HttpResponse(json.dumps(data), status=401)
30 return func(self, request, *args, **kwargs)
31 return wrapped
32
[end of src/sentry/api/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/api/decorators.py b/src/sentry/api/decorators.py
--- a/src/sentry/api/decorators.py
+++ b/src/sentry/api/decorators.py
@@ -5,12 +5,13 @@
from django.http import HttpResponse
from functools import wraps
-from sentry.models import ApiKey
+from sentry.models import ApiKey, ApiToken
def is_considered_sudo(request):
return request.is_sudo() or \
- isinstance(request.auth, ApiKey)
+ isinstance(request.auth, ApiKey) or \
+ isinstance(request.auth, ApiToken)
def sudo_required(func):
| {"golden_diff": "diff --git a/src/sentry/api/decorators.py b/src/sentry/api/decorators.py\n--- a/src/sentry/api/decorators.py\n+++ b/src/sentry/api/decorators.py\n@@ -5,12 +5,13 @@\n from django.http import HttpResponse\n from functools import wraps\n \n-from sentry.models import ApiKey\n+from sentry.models import ApiKey, ApiToken\n \n \n def is_considered_sudo(request):\n return request.is_sudo() or \\\n- isinstance(request.auth, ApiKey)\n+ isinstance(request.auth, ApiKey) or \\\n+ isinstance(request.auth, ApiToken)\n \n \n def sudo_required(func):\n", "issue": "New Auth Tokens cannot pass sudo check\nThe new auth token based API access never passes sudo:\n\n``` python\ndef is_considered_sudo(request):\n return request.is_sudo() or \\\n isinstance(request.auth, ApiKey)\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport json\n\nfrom django.http import HttpResponse\nfrom functools import wraps\n\nfrom sentry.models import ApiKey\n\n\ndef is_considered_sudo(request):\n return request.is_sudo() or \\\n isinstance(request.auth, ApiKey)\n\n\ndef sudo_required(func):\n @wraps(func)\n def wrapped(self, request, *args, **kwargs):\n # If we are already authenticated through an API key we do not\n # care about the sudo flag.\n if not is_considered_sudo(request):\n # TODO(dcramer): support some kind of auth flow to allow this\n # externally\n data = {\n \"error\": \"Account verification required.\",\n \"sudoRequired\": True,\n \"username\": request.user.username,\n }\n return HttpResponse(json.dumps(data), status=401)\n return func(self, request, *args, **kwargs)\n return wrapped\n", "path": "src/sentry/api/decorators.py"}]} | 847 | 141 |
gh_patches_debug_38169 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-506 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
subversion: export fails if dest directory exists
Using the following syntax, the task always fails if /tmp/foo already exists.
`- name: (testing) svn export of desired callflow.xml
subversion: repo=http://repo.local/repos/target/ dest=/tmp/foo/ export=yes`
The error shown is:
`msg: ERROR: /tmp/foo/ folder already exists, but its not a subversion repository.`
This appears to be happening because the subversion module does not account for exporting if the target directory exists (but is not a working copy):
`if not os.path.exists(dest):`
...export and checkout both handled
`elif os.path.exists("%s/.svn" % (dest, )):`
...checkout handled. export presumably not handled because this is a working directory
`else:
module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))`
I think if we insert another case before the `else` of `elif os.path.exists(dest) and export:` and handle export to a preexisting directory there, it should work.
</issue>
<code>
[start of source_control/subversion.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Michael DeHaan <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 DOCUMENTATION = '''
22 ---
23 module: subversion
24 short_description: Deploys a subversion repository.
25 description:
26 - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
27 version_added: "0.7"
28 author: Dane Summers, [email protected]
29 notes:
30 - Requires I(svn) to be installed on the client.
31 requirements: []
32 options:
33 repo:
34 description:
35 - The subversion URL to the repository.
36 required: true
37 aliases: [ name, repository ]
38 default: null
39 dest:
40 description:
41 - Absolute path where the repository should be deployed.
42 required: true
43 default: null
44 revision:
45 description:
46 - Specific revision to checkout.
47 required: false
48 default: HEAD
49 aliases: [ version ]
50 force:
51 description:
52 - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
53 required: false
54 default: "yes"
55 choices: [ "yes", "no" ]
56 username:
57 description:
58 - --username parameter passed to svn.
59 required: false
60 default: null
61 password:
62 description:
63 - --password parameter passed to svn.
64 required: false
65 default: null
66 executable:
67 required: false
68 default: null
69 version_added: "1.4"
70 description:
71 - Path to svn executable to use. If not supplied,
72 the normal mechanism for resolving binary paths will be used.
73 export:
74 required: false
75 default: "no"
76 choices: [ "yes", "no" ]
77 version_added: "1.6"
78 description:
79 - If C(yes), do export instead of checkout/update.
80 '''
81
82 EXAMPLES = '''
83 # Checkout subversion repository to specified folder.
84 - subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout
85
86 # Export subversion directory to folder
87 - subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True
88 '''
89
90 import re
91 import tempfile
92
93
94 class Subversion(object):
95 def __init__(
96 self, module, dest, repo, revision, username, password, svn_path):
97 self.module = module
98 self.dest = dest
99 self.repo = repo
100 self.revision = revision
101 self.username = username
102 self.password = password
103 self.svn_path = svn_path
104
105 def _exec(self, args):
106 bits = [
107 self.svn_path,
108 '--non-interactive',
109 '--trust-server-cert',
110 '--no-auth-cache',
111 ]
112 if self.username:
113 bits.extend(["--username", self.username])
114 if self.password:
115 bits.extend(["--password", self.password])
116 bits.extend(args)
117 rc, out, err = self.module.run_command(bits, check_rc=True)
118 return out.splitlines()
119
120 def checkout(self):
121 '''Creates new svn working directory if it does not already exist.'''
122 self._exec(["checkout", "-r", self.revision, self.repo, self.dest])
123
124 def export(self, force=False):
125 '''Export svn repo to directory'''
126 self._exec(["export", "-r", self.revision, self.repo, self.dest])
127
128 def switch(self):
129 '''Change working directory's repo.'''
130 # switch to ensure we are pointing at correct repo.
131 self._exec(["switch", self.repo, self.dest])
132
133 def update(self):
134 '''Update existing svn working directory.'''
135 self._exec(["update", "-r", self.revision, self.dest])
136
137 def revert(self):
138 '''Revert svn working directory.'''
139 self._exec(["revert", "-R", self.dest])
140
141 def get_revision(self):
142 '''Revision and URL of subversion working directory.'''
143 text = '\n'.join(self._exec(["info", self.dest]))
144 rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
145 url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
146 return rev, url
147
148 def has_local_mods(self):
149 '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
150 lines = self._exec(["status", self.dest])
151 # Match only revisioned files, i.e. ignore status '?'.
152 regex = re.compile(r'^[^?]')
153 # Has local mods if more than 0 modifed revisioned files.
154 return len(filter(regex.match, lines)) > 0
155
156 def needs_update(self):
157 curr, url = self.get_revision()
158 out2 = '\n'.join(self._exec(["info", "-r", "HEAD", self.dest]))
159 head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)
160 rev1 = int(curr.split(':')[1].strip())
161 rev2 = int(head.split(':')[1].strip())
162 change = False
163 if rev1 < rev2:
164 change = True
165 return change, curr, head
166
167
168 # ===========================================
169
170 def main():
171 module = AnsibleModule(
172 argument_spec=dict(
173 dest=dict(required=True),
174 repo=dict(required=True, aliases=['name', 'repository']),
175 revision=dict(default='HEAD', aliases=['rev', 'version']),
176 force=dict(default='yes', type='bool'),
177 username=dict(required=False),
178 password=dict(required=False),
179 executable=dict(default=None),
180 export=dict(default=False, required=False, type='bool'),
181 ),
182 supports_check_mode=True
183 )
184
185 dest = os.path.expanduser(module.params['dest'])
186 repo = module.params['repo']
187 revision = module.params['revision']
188 force = module.params['force']
189 username = module.params['username']
190 password = module.params['password']
191 svn_path = module.params['executable'] or module.get_bin_path('svn', True)
192 export = module.params['export']
193
194 os.environ['LANG'] = 'C'
195 svn = Subversion(module, dest, repo, revision, username, password, svn_path)
196
197 if not os.path.exists(dest):
198 before = None
199 local_mods = False
200 if module.check_mode:
201 module.exit_json(changed=True)
202 if not export:
203 svn.checkout()
204 else:
205 svn.export()
206 elif os.path.exists("%s/.svn" % (dest, )):
207 # Order matters. Need to get local mods before switch to avoid false
208 # positives. Need to switch before revert to ensure we are reverting to
209 # correct repo.
210 if module.check_mode:
211 check, before, after = svn.needs_update()
212 module.exit_json(changed=check, before=before, after=after)
213 before = svn.get_revision()
214 local_mods = svn.has_local_mods()
215 svn.switch()
216 if local_mods:
217 if force:
218 svn.revert()
219 else:
220 module.fail_json(msg="ERROR: modified files exist in the repository.")
221 svn.update()
222 else:
223 module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))
224
225 after = svn.get_revision()
226 changed = before != after or local_mods
227 module.exit_json(changed=changed, before=before, after=after)
228
229 # import module snippets
230 from ansible.module_utils.basic import *
231 main()
232
[end of source_control/subversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source_control/subversion.py b/source_control/subversion.py
--- a/source_control/subversion.py
+++ b/source_control/subversion.py
@@ -123,7 +123,12 @@
def export(self, force=False):
'''Export svn repo to directory'''
- self._exec(["export", "-r", self.revision, self.repo, self.dest])
+ cmd = ["export"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+
+ self._exec(cmd)
def switch(self):
'''Change working directory's repo.'''
@@ -173,7 +178,7 @@
dest=dict(required=True),
repo=dict(required=True, aliases=['name', 'repository']),
revision=dict(default='HEAD', aliases=['rev', 'version']),
- force=dict(default='yes', type='bool'),
+ force=dict(default='no', type='bool'),
username=dict(required=False),
password=dict(required=False),
executable=dict(default=None),
@@ -194,7 +199,7 @@
os.environ['LANG'] = 'C'
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
- if not os.path.exists(dest):
+ if export or not os.path.exists(dest):
before = None
local_mods = False
if module.check_mode:
@@ -202,7 +207,7 @@
if not export:
svn.checkout()
else:
- svn.export()
+ svn.export(force=force)
elif os.path.exists("%s/.svn" % (dest, )):
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
@@ -222,9 +227,12 @@
else:
module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))
- after = svn.get_revision()
- changed = before != after or local_mods
- module.exit_json(changed=changed, before=before, after=after)
+ if export:
+ module.exit_json(changed=True)
+ else:
+ after = svn.get_revision()
+ changed = before != after or local_mods
+ module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
| {"golden_diff": "diff --git a/source_control/subversion.py b/source_control/subversion.py\n--- a/source_control/subversion.py\n+++ b/source_control/subversion.py\n@@ -123,7 +123,12 @@\n \t\t\n def export(self, force=False):\n '''Export svn repo to directory'''\n- self._exec([\"export\", \"-r\", self.revision, self.repo, self.dest])\n+ cmd = [\"export\"]\n+ if force:\n+ cmd.append(\"--force\")\n+ cmd.extend([\"-r\", self.revision, self.repo, self.dest])\n+\n+ self._exec(cmd)\n \n def switch(self):\n '''Change working directory's repo.'''\n@@ -173,7 +178,7 @@\n dest=dict(required=True),\n repo=dict(required=True, aliases=['name', 'repository']),\n revision=dict(default='HEAD', aliases=['rev', 'version']),\n- force=dict(default='yes', type='bool'),\n+ force=dict(default='no', type='bool'),\n username=dict(required=False),\n password=dict(required=False),\n executable=dict(default=None),\n@@ -194,7 +199,7 @@\n os.environ['LANG'] = 'C'\n svn = Subversion(module, dest, repo, revision, username, password, svn_path)\n \n- if not os.path.exists(dest):\n+ if export or not os.path.exists(dest):\n before = None\n local_mods = False\n if module.check_mode:\n@@ -202,7 +207,7 @@\n if not export:\n svn.checkout()\n else:\n- svn.export()\n+ svn.export(force=force)\n elif os.path.exists(\"%s/.svn\" % (dest, )):\n # Order matters. Need to get local mods before switch to avoid false\n # positives. Need to switch before revert to ensure we are reverting to\n@@ -222,9 +227,12 @@\n else:\n module.fail_json(msg=\"ERROR: %s folder already exists, but its not a subversion repository.\" % (dest, ))\n \n- after = svn.get_revision()\n- changed = before != after or local_mods\n- module.exit_json(changed=changed, before=before, after=after)\n+ if export:\n+ module.exit_json(changed=True)\n+ else:\n+ after = svn.get_revision()\n+ changed = before != after or local_mods\n+ module.exit_json(changed=changed, before=before, after=after)\n \n # import module snippets\n from ansible.module_utils.basic import *\n", "issue": "subversion: export fails if dest directory exists\nUsing the following syntax, the task always fails if /tmp/foo already exists.\n\n`- name: (testing) svn export of desired callflow.xml\n subversion: repo=http://repo.local/repos/target/ dest=/tmp/foo/ export=yes`\n\nThe error shown is:\n`msg: ERROR: /tmp/foo/ folder already exists, but its not a subversion repository.`\n\nThis appears to be happening because the subversion module does not account for exporting if the target directory exists (but is not a working copy):\n\n`if not os.path.exists(dest):`\n...export and checkout both handled\n`elif os.path.exists(\"%s/.svn\" % (dest, )):`\n...checkout handled. export presumably not handled because this is a working directory\n`else:\n module.fail_json(msg=\"ERROR: %s folder already exists, but its not a subversion repository.\" % (dest, ))`\n\nI think if we insert another case before the `else` of `elif os.path.exists(dest) and export:` and handle export to a preexisting directory there, it should work.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: subversion\nshort_description: Deploys a subversion repository.\ndescription:\n - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.\nversion_added: \"0.7\"\nauthor: Dane Summers, [email protected]\nnotes:\n - Requires I(svn) to be installed on the client.\nrequirements: []\noptions:\n repo:\n description:\n - The subversion URL to the repository.\n required: true\n aliases: [ name, repository ]\n default: null\n dest:\n description:\n - Absolute path where the repository should be deployed.\n required: true\n default: null\n revision:\n description:\n - Specific revision to checkout.\n required: false\n default: HEAD\n aliases: [ version ]\n force:\n description:\n - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.\n required: false\n default: \"yes\"\n choices: [ \"yes\", \"no\" ]\n username:\n description:\n - --username parameter passed to svn.\n required: false\n default: null\n password:\n description:\n - --password parameter passed to svn.\n required: false\n default: null\n executable:\n required: false\n default: null\n version_added: \"1.4\"\n description:\n - Path to svn executable to use. If not supplied,\n the normal mechanism for resolving binary paths will be used.\n export:\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n version_added: \"1.6\"\n description:\n - If C(yes), do export instead of checkout/update.\n'''\n\nEXAMPLES = '''\n# Checkout subversion repository to specified folder.\n- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout\n\n# Export subversion directory to folder\n- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True\n'''\n\nimport re\nimport tempfile\n\n\nclass Subversion(object):\n def __init__(\n self, module, dest, repo, revision, username, password, svn_path):\n self.module = module\n self.dest = dest\n self.repo = repo\n self.revision = revision\n self.username = username\n self.password = password\n self.svn_path = svn_path\n\n def _exec(self, args):\n bits = [\n self.svn_path,\n '--non-interactive',\n '--trust-server-cert',\n '--no-auth-cache',\n ]\n if self.username:\n bits.extend([\"--username\", self.username])\n if self.password:\n bits.extend([\"--password\", self.password])\n bits.extend(args)\n rc, out, err = self.module.run_command(bits, check_rc=True)\n return out.splitlines()\n\n def checkout(self):\n '''Creates new svn working directory if it does not already exist.'''\n self._exec([\"checkout\", \"-r\", self.revision, self.repo, self.dest])\n\t\t\n def export(self, force=False):\n '''Export svn repo to directory'''\n self._exec([\"export\", \"-r\", self.revision, self.repo, self.dest])\n\n def switch(self):\n '''Change working directory's repo.'''\n # switch to ensure we are pointing at correct repo.\n self._exec([\"switch\", self.repo, self.dest])\n\n def update(self):\n '''Update existing svn working directory.'''\n self._exec([\"update\", \"-r\", self.revision, self.dest])\n\n def revert(self):\n '''Revert svn working directory.'''\n self._exec([\"revert\", \"-R\", self.dest])\n\n def get_revision(self):\n '''Revision and URL of subversion working directory.'''\n text = '\\n'.join(self._exec([\"info\", self.dest]))\n rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)\n url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)\n return rev, url\n\n def has_local_mods(self):\n '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''\n lines = self._exec([\"status\", self.dest])\n # Match only revisioned files, i.e. ignore status '?'.\n regex = re.compile(r'^[^?]')\n # Has local mods if more than 0 modifed revisioned files.\n return len(filter(regex.match, lines)) > 0\n\n def needs_update(self):\n curr, url = self.get_revision()\n out2 = '\\n'.join(self._exec([\"info\", \"-r\", \"HEAD\", self.dest]))\n head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)\n rev1 = int(curr.split(':')[1].strip())\n rev2 = int(head.split(':')[1].strip())\n change = False\n if rev1 < rev2:\n change = True\n return change, curr, head\n\n\n# ===========================================\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n dest=dict(required=True),\n repo=dict(required=True, aliases=['name', 'repository']),\n revision=dict(default='HEAD', aliases=['rev', 'version']),\n force=dict(default='yes', type='bool'),\n username=dict(required=False),\n password=dict(required=False),\n executable=dict(default=None),\n export=dict(default=False, required=False, type='bool'),\n ),\n supports_check_mode=True\n )\n\n dest = os.path.expanduser(module.params['dest'])\n repo = module.params['repo']\n revision = module.params['revision']\n force = module.params['force']\n username = module.params['username']\n password = module.params['password']\n svn_path = module.params['executable'] or module.get_bin_path('svn', True)\n export = module.params['export']\n\n os.environ['LANG'] = 'C'\n svn = Subversion(module, dest, repo, revision, username, password, svn_path)\n\n if not os.path.exists(dest):\n before = None\n local_mods = False\n if module.check_mode:\n module.exit_json(changed=True)\n if not export:\n svn.checkout()\n else:\n svn.export()\n elif os.path.exists(\"%s/.svn\" % (dest, )):\n # Order matters. Need to get local mods before switch to avoid false\n # positives. Need to switch before revert to ensure we are reverting to\n # correct repo.\n if module.check_mode:\n check, before, after = svn.needs_update()\n module.exit_json(changed=check, before=before, after=after)\n before = svn.get_revision()\n local_mods = svn.has_local_mods()\n svn.switch()\n if local_mods:\n if force:\n svn.revert()\n else:\n module.fail_json(msg=\"ERROR: modified files exist in the repository.\")\n svn.update()\n else:\n module.fail_json(msg=\"ERROR: %s folder already exists, but its not a subversion repository.\" % (dest, ))\n\n after = svn.get_revision()\n changed = before != after or local_mods\n module.exit_json(changed=changed, before=before, after=after)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "source_control/subversion.py"}]} | 3,142 | 558 |
gh_patches_debug_31442 | rasdani/github-patches | git_diff | Flexget__Flexget-1662 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can’t send email notification with full statistics including rejected and undecided entries (not only accepted/failed ones)
### Expected behaviour:
notification with accepted|rejected|undecided|failed entries is sent
### Actual behaviour:
notification with only accepted and failed entries is sent
### Steps to reproduce:
- Step 1: add to tasks "digest" plugin with "accepted|rejected|undecided|failed" options
- Step 2: add "emailreport" task with "notify" and "from_digest" plugins and "restore_state: yes" option
- Step 3: execute tasks from step 1
#### Config example:
```
schedules:
- tasks: 'task1'
interval:
hours: 2
- tasks: 'task2'
interval:
hours: 4
- tasks: 'emailreport'
schedule:
hour: 21
...
templates:
...
video:
digest:
list: email_list
state:
- accepted
- rejected
- undecided
- failed
...
tasks:
task1:
template:
- video
...
task2:
template:
- video
...
emailreport:
no_entries_ok: yes
from_digest:
list: email_list
restore_state: yes
seen: local
notify:
task:
title: "[FLEXGET] Daily report"
template: custom_html.template
via:
- email:
from: '{? email.email_from ?}'
to:
- '{? email.email_to1 ?}'
- '{? email.email_to2 ?}'
smtp_host: smtp.gmail.com
smtp_port: 587
smtp_username: '{? email.smtp_username ?}'
smtp_password: '{? email.smtp_password ?}'
smtp_tls: yes
html: yes
```
#### Log:
```
2017-01-26 21:00 VERBOSE details emailreport Produced 586 entries.
2017-01-26 21:00 VERBOSE manipulate emailreport Modified 0 entries.
2017-01-26 21:00 VERBOSE details emailreport Summary - Accepted: 0 (Rejected: 1070 Undecided: 586 Failed: 0)
2017-01-26 21:00 WARNING task emailreport Task doesn't have any output plugins, you should add (at least) one!
2017-01-26 21:00 VERBOSE notify_entry emailreport No accepted or failed entries, not sending a notification.
```
### Additional information:
- Flexget Version: 2.9.8
- Python Version: 2.7.12
- Installation method: pip
- OS and version: Ubuntu 16.04
P.S.
As @liiight proposed maybe "always_send" should be added to the "notify" plugin
Related issue and FR in forum:
https://discuss.flexget.com/t/cant-send-email-notification-with-full-statistics-including-rejected-and-undecided-entries-not-only-accepted-failed-ones/3129
https://discuss.flexget.com/t/add-always-send-option-to-notify-plugin/3130?source_topic_id=3129
Can’t send email notification with full statistics including rejected and undecided entries (not only accepted/failed ones)
### Expected behaviour:
notification with accepted|rejected|undecided|failed entries is sent
### Actual behaviour:
notification with only accepted and failed entries is sent
### Steps to reproduce:
- Step 1: add to tasks "digest" plugin with "accepted|rejected|undecided|failed" options
- Step 2: add "emailreport" task with "notify" and "from_digest" plugins and "restore_state: yes" option
- Step 3: execute tasks from step 1
#### Config example:
```
schedules:
- tasks: 'task1'
interval:
hours: 2
- tasks: 'task2'
interval:
hours: 4
- tasks: 'emailreport'
schedule:
hour: 21
...
templates:
...
video:
digest:
list: email_list
state:
- accepted
- rejected
- undecided
- failed
...
tasks:
task1:
template:
- video
...
task2:
template:
- video
...
emailreport:
no_entries_ok: yes
from_digest:
list: email_list
restore_state: yes
seen: local
notify:
task:
title: "[FLEXGET] Daily report"
template: custom_html.template
via:
- email:
from: '{? email.email_from ?}'
to:
- '{? email.email_to1 ?}'
- '{? email.email_to2 ?}'
smtp_host: smtp.gmail.com
smtp_port: 587
smtp_username: '{? email.smtp_username ?}'
smtp_password: '{? email.smtp_password ?}'
smtp_tls: yes
html: yes
```
#### Log:
```
2017-01-26 21:00 VERBOSE details emailreport Produced 586 entries.
2017-01-26 21:00 VERBOSE manipulate emailreport Modified 0 entries.
2017-01-26 21:00 VERBOSE details emailreport Summary - Accepted: 0 (Rejected: 1070 Undecided: 586 Failed: 0)
2017-01-26 21:00 WARNING task emailreport Task doesn't have any output plugins, you should add (at least) one!
2017-01-26 21:00 VERBOSE notify_entry emailreport No accepted or failed entries, not sending a notification.
```
### Additional information:
- Flexget Version: 2.9.8
- Python Version: 2.7.12
- Installation method: pip
- OS and version: Ubuntu 16.04
P.S.
As @liiight proposed maybe "always_send" should be added to the "notify" plugin
Related issue and FR in forum:
https://discuss.flexget.com/t/cant-send-email-notification-with-full-statistics-including-rejected-and-undecided-entries-not-only-accepted-failed-ones/3129
https://discuss.flexget.com/t/add-always-send-option-to-notify-plugin/3130?source_topic_id=3129
</issue>
<code>
[start of flexget/plugins/notifiers/notify.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 import itertools
7 from flexget import plugin
8 from flexget.config_schema import one_or_more
9 from flexget.event import event
10 from flexget.utils.template import get_template
11
12 log = logging.getLogger('notify_entry')
13
14 ENTRY_CONTAINERS = ['entries', 'accepted', 'rejected', 'failed', 'undecided']
15
16 VIA_SCHEMA = {
17 'type': 'array',
18 'items': {
19 'allOf': [
20 {'$ref': '/schema/plugins?interface=notifiers'},
21 {
22 'maxProperties': 1,
23 'error_maxProperties': 'Plugin options indented 2 more spaces than '
24 'the first letter of the plugin name.',
25 'minProperties': 1
26 }
27 ]
28 }
29 }
30
31
32 class Notify(object):
33 schema = {
34 'type': 'object',
35 'properties': {
36 'entries': {
37 'type': 'object',
38 'properties': {
39 'title': {'type': 'string', 'default': '{{ title }}'},
40 'message': {
41 'type': 'string',
42 'default': '{% if series_name is defined %}'
43 '{{ tvdb_series_name|d(series_name) }} '
44 '{{series_id}} {{tvdb_ep_name|d('')}}'
45 '{% elif imdb_name is defined %}'
46 '{{imdb_name}} {{imdb_year}}'
47 '{% elif title is defined %}'
48 '{{ title }}'
49 '{% endif %}'
50 },
51 'template': {'type': 'string'},
52 'what': one_or_more({'type': 'string', 'enum': ENTRY_CONTAINERS}),
53 'via': VIA_SCHEMA
54 },
55 'required': ['via'],
56 'additionalProperties': False
57 },
58 'task': {
59 'type': 'object',
60 'properties': {
61 'title': {
62 'type': 'string',
63 'default': '[FlexGet] {{task.name}}:'
64 '{%if task.failed %} {{task.failed|length}} failed entries.{% endif %}'
65 '{% if task.accepted %} {{task.accepted|length}} new entries downloaded.{% endif %}'},
66 'template': {'type': 'string', 'default': 'default.template'},
67 'via': VIA_SCHEMA
68 },
69 'required': ['via'],
70 'additionalProperties': False
71 },
72 'abort': {
73 'type': 'object',
74 'properties': {
75 'title': {'type': 'string', 'default': 'Task {{ task.name }} has aborted!'},
76 'message': {'type': 'string', 'default': 'Reason: {{ task.abort_reason }}'},
77 'via': VIA_SCHEMA
78 },
79 'required': ['via']
80 }
81 },
82 'additionalProperties': False,
83 'minProperties': 1,
84 'error_minProperties': 'You must specify at least one of `entries` or `task` in your notify config.'
85 }
86
87 def prepare_config(self, config):
88 if 'entries' in config:
89 config['entries'].setdefault('what', ['accepted'])
90 if not isinstance(config['entries']['what'], list):
91 config['entries']['what'] = [config['entries']['what']]
92 return config
93
94 def send_notification(self, *args, **kwargs):
95 send_notification = plugin.get_plugin_by_name('notification_framework').instance.send_notification
96 try:
97 send_notification(*args, **kwargs)
98 except plugin.PluginError as e:
99 log.error(e)
100 except plugin.PluginWarning as e:
101 log.warning(e)
102 except Exception as e:
103 log.exception(e)
104
105 @plugin.priority(0)
106 def on_task_output(self, task, config):
107 config = self.prepare_config(config)
108 if 'entries' in config:
109 entries = list(itertools.chain(*(getattr(task, what) for what in config['entries']['what'])))
110 if not entries:
111 log.debug('No entries to notify about.')
112 return
113 # If a file template is defined, it overrides message
114 if config['entries'].get('template'):
115 try:
116 message = get_template(config['entries']['template'], scope='entry')
117 except ValueError:
118 raise plugin.PluginError('Cannot locate template on disk: %s' % config['entries']['template'])
119 else:
120 message = config['entries']['message']
121 for entry in entries:
122 self.send_notification(config['entries']['title'], message, config['entries']['via'],
123 template_renderer=entry.render)
124 if 'task' in config:
125 if not (task.accepted or task.failed):
126 log.verbose('No accepted or failed entries, not sending a notification.')
127 return
128 try:
129 template = get_template(config['task']['template'], scope='task')
130 except ValueError:
131 raise plugin.PluginError('Cannot locate template on disk: %s' % config['task']['template'])
132 self.send_notification(config['task']['title'], template, config['task']['via'],
133 template_renderer=task.render)
134
135 def on_task_abort(self, task, config):
136 if 'abort' in config:
137 if task.silent_abort:
138 return
139 log.debug('sending abort notification')
140 self.send_notification(config['abort']['title'], config['abort']['message'], config['abort']['via'],
141 template_renderer=task.render)
142
143
144 @event('plugin.register')
145 def register_plugin():
146 plugin.register(Notify, 'notify', api_ver=2)
147
[end of flexget/plugins/notifiers/notify.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/notifiers/notify.py b/flexget/plugins/notifiers/notify.py
--- a/flexget/plugins/notifiers/notify.py
+++ b/flexget/plugins/notifiers/notify.py
@@ -60,10 +60,15 @@
'properties': {
'title': {
'type': 'string',
- 'default': '[FlexGet] {{task.name}}:'
- '{%if task.failed %} {{task.failed|length}} failed entries.{% endif %}'
- '{% if task.accepted %} {{task.accepted|length}} new entries downloaded.{% endif %}'},
+ 'default': '{% if not task.failed or not task.accepted %} Task {{task.name}} did not'
+ ' produce any entries.'
+ '{% else %} [FlexGet] {{task.name}}:'
+ '{% if task.failed %} {{task.failed|length}} failed entries.{% endif %}'
+ '{% if task.accepted %} {{task.accepted|length}} new entries downloaded.{% endif %}'
+ '{% endif %}'
+ },
'template': {'type': 'string', 'default': 'default.template'},
+ 'always_send': {'type': 'boolean', 'default': False},
'via': VIA_SCHEMA
},
'required': ['via'],
@@ -122,7 +127,7 @@
self.send_notification(config['entries']['title'], message, config['entries']['via'],
template_renderer=entry.render)
if 'task' in config:
- if not (task.accepted or task.failed):
+ if not (task.accepted or task.failed) and not config['task']['always_send']:
log.verbose('No accepted or failed entries, not sending a notification.')
return
try:
| {"golden_diff": "diff --git a/flexget/plugins/notifiers/notify.py b/flexget/plugins/notifiers/notify.py\n--- a/flexget/plugins/notifiers/notify.py\n+++ b/flexget/plugins/notifiers/notify.py\n@@ -60,10 +60,15 @@\n 'properties': {\n 'title': {\n 'type': 'string',\n- 'default': '[FlexGet] {{task.name}}:'\n- '{%if task.failed %} {{task.failed|length}} failed entries.{% endif %}'\n- '{% if task.accepted %} {{task.accepted|length}} new entries downloaded.{% endif %}'},\n+ 'default': '{% if not task.failed or not task.accepted %} Task {{task.name}} did not'\n+ ' produce any entries.'\n+ '{% else %} [FlexGet] {{task.name}}:'\n+ '{% if task.failed %} {{task.failed|length}} failed entries.{% endif %}'\n+ '{% if task.accepted %} {{task.accepted|length}} new entries downloaded.{% endif %}'\n+ '{% endif %}'\n+ },\n 'template': {'type': 'string', 'default': 'default.template'},\n+ 'always_send': {'type': 'boolean', 'default': False},\n 'via': VIA_SCHEMA\n },\n 'required': ['via'],\n@@ -122,7 +127,7 @@\n self.send_notification(config['entries']['title'], message, config['entries']['via'],\n template_renderer=entry.render)\n if 'task' in config:\n- if not (task.accepted or task.failed):\n+ if not (task.accepted or task.failed) and not config['task']['always_send']:\n log.verbose('No accepted or failed entries, not sending a notification.')\n return\n try:\n", "issue": "Can\u2019t send email notification with full statistics including rejected and undecided entries (not only accepted/failed ones)\n### Expected behaviour:\r\nnotification with accepted|rejected|undecided|failed entries is sent\r\n\r\n### Actual behaviour:\r\nnotification with only accepted and failed entries is sent\r\n\r\n### Steps to reproduce:\r\n- Step 1: add to tasks \"digest\" plugin with \"accepted|rejected|undecided|failed\" options\r\n- Step 2: add \"emailreport\" task with \"notify\" and \"from_digest\" plugins and \"restore_state: yes\" option\r\n- Step 3: execute tasks from step 1\r\n\r\n#### Config example:\r\n```\r\nschedules:\r\n - tasks: 'task1'\r\n interval:\r\n hours: 2\r\n - tasks: 'task2'\r\n interval:\r\n hours: 4\r\n - tasks: 'emailreport'\r\n schedule:\r\n hour: 21\r\n...\r\ntemplates:\r\n ...\r\n video:\r\n digest:\r\n list: email_list\r\n state:\r\n - accepted\r\n - rejected\r\n - undecided\r\n - failed\r\n...\r\ntasks:\r\n task1:\r\n template:\r\n - video\r\n...\r\n task2:\r\n template:\r\n - video\r\n...\r\n emailreport:\r\n no_entries_ok: yes\r\n from_digest:\r\n list: email_list\r\n restore_state: yes\r\n seen: local\r\n\r\n notify:\r\n task:\r\n title: \"[FLEXGET] Daily report\"\r\n template: custom_html.template\r\n via:\r\n - email:\r\n from: '{? email.email_from ?}'\r\n to:\r\n - '{? email.email_to1 ?}'\r\n - '{? email.email_to2 ?}'\r\n smtp_host: smtp.gmail.com\r\n smtp_port: 587\r\n smtp_username: '{? email.smtp_username ?}'\r\n smtp_password: '{? email.smtp_password ?}'\r\n smtp_tls: yes\r\n html: yes\r\n```\r\n \r\n#### Log:\r\n```\r\n2017-01-26 21:00 VERBOSE details emailreport Produced 586 entries.\r\n2017-01-26 21:00 VERBOSE manipulate emailreport Modified 0 entries.\r\n2017-01-26 21:00 VERBOSE details emailreport Summary - Accepted: 0 (Rejected: 1070 Undecided: 586 Failed: 0)\r\n2017-01-26 21:00 WARNING task emailreport Task doesn't have any output plugins, you should add (at least) one!\r\n2017-01-26 21:00 VERBOSE notify_entry emailreport No accepted or failed entries, not sending a notification.\r\n```\r\n\r\n### Additional information:\r\n\r\n- Flexget Version: 2.9.8\r\n- Python Version: 2.7.12\r\n- Installation method: pip\r\n- OS and version: Ubuntu 16.04\r\n\r\nP.S.\r\nAs @liiight proposed maybe \"always_send\" should be added to the \"notify\" plugin\r\n\r\nRelated issue and FR in forum:\r\nhttps://discuss.flexget.com/t/cant-send-email-notification-with-full-statistics-including-rejected-and-undecided-entries-not-only-accepted-failed-ones/3129\r\nhttps://discuss.flexget.com/t/add-always-send-option-to-notify-plugin/3130?source_topic_id=3129\r\n\r\n\nCan\u2019t send email notification with full statistics including rejected and undecided entries (not only accepted/failed ones)\n### Expected behaviour:\r\nnotification with accepted|rejected|undecided|failed entries is sent\r\n\r\n### Actual behaviour:\r\nnotification with only accepted and failed entries is sent\r\n\r\n### Steps to reproduce:\r\n- Step 1: add to tasks \"digest\" plugin with \"accepted|rejected|undecided|failed\" options\r\n- Step 2: add \"emailreport\" task with \"notify\" and \"from_digest\" plugins and \"restore_state: yes\" option\r\n- Step 3: execute tasks from step 1\r\n\r\n#### Config example:\r\n```\r\nschedules:\r\n - tasks: 'task1'\r\n interval:\r\n hours: 2\r\n - tasks: 'task2'\r\n interval:\r\n hours: 4\r\n - tasks: 'emailreport'\r\n schedule:\r\n hour: 21\r\n...\r\ntemplates:\r\n ...\r\n video:\r\n digest:\r\n list: email_list\r\n state:\r\n - accepted\r\n - rejected\r\n - undecided\r\n - failed\r\n...\r\ntasks:\r\n task1:\r\n template:\r\n - video\r\n...\r\n task2:\r\n template:\r\n - video\r\n...\r\n emailreport:\r\n no_entries_ok: yes\r\n from_digest:\r\n list: email_list\r\n restore_state: yes\r\n seen: local\r\n\r\n notify:\r\n task:\r\n title: \"[FLEXGET] Daily report\"\r\n template: custom_html.template\r\n via:\r\n - email:\r\n from: '{? email.email_from ?}'\r\n to:\r\n - '{? email.email_to1 ?}'\r\n - '{? email.email_to2 ?}'\r\n smtp_host: smtp.gmail.com\r\n smtp_port: 587\r\n smtp_username: '{? email.smtp_username ?}'\r\n smtp_password: '{? email.smtp_password ?}'\r\n smtp_tls: yes\r\n html: yes\r\n```\r\n \r\n#### Log:\r\n```\r\n2017-01-26 21:00 VERBOSE details emailreport Produced 586 entries.\r\n2017-01-26 21:00 VERBOSE manipulate emailreport Modified 0 entries.\r\n2017-01-26 21:00 VERBOSE details emailreport Summary - Accepted: 0 (Rejected: 1070 Undecided: 586 Failed: 0)\r\n2017-01-26 21:00 WARNING task emailreport Task doesn't have any output plugins, you should add (at least) one!\r\n2017-01-26 21:00 VERBOSE notify_entry emailreport No accepted or failed entries, not sending a notification.\r\n```\r\n\r\n### Additional information:\r\n\r\n- Flexget Version: 2.9.8\r\n- Python Version: 2.7.12\r\n- Installation method: pip\r\n- OS and version: Ubuntu 16.04\r\n\r\nP.S.\r\nAs @liiight proposed maybe \"always_send\" should be added to the \"notify\" plugin\r\n\r\nRelated issue and FR in forum:\r\nhttps://discuss.flexget.com/t/cant-send-email-notification-with-full-statistics-including-rejected-and-undecided-entries-not-only-accepted-failed-ones/3129\r\nhttps://discuss.flexget.com/t/add-always-send-option-to-notify-plugin/3130?source_topic_id=3129\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nimport itertools\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\nfrom flexget.utils.template import get_template\n\nlog = logging.getLogger('notify_entry')\n\nENTRY_CONTAINERS = ['entries', 'accepted', 'rejected', 'failed', 'undecided']\n\nVIA_SCHEMA = {\n 'type': 'array',\n 'items': {\n 'allOf': [\n {'$ref': '/schema/plugins?interface=notifiers'},\n {\n 'maxProperties': 1,\n 'error_maxProperties': 'Plugin options indented 2 more spaces than '\n 'the first letter of the plugin name.',\n 'minProperties': 1\n }\n ]\n }\n}\n\n\nclass Notify(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'entries': {\n 'type': 'object',\n 'properties': {\n 'title': {'type': 'string', 'default': '{{ title }}'},\n 'message': {\n 'type': 'string',\n 'default': '{% if series_name is defined %}'\n '{{ tvdb_series_name|d(series_name) }} '\n '{{series_id}} {{tvdb_ep_name|d('')}}'\n '{% elif imdb_name is defined %}'\n '{{imdb_name}} {{imdb_year}}'\n '{% elif title is defined %}'\n '{{ title }}'\n '{% endif %}'\n },\n 'template': {'type': 'string'},\n 'what': one_or_more({'type': 'string', 'enum': ENTRY_CONTAINERS}),\n 'via': VIA_SCHEMA\n },\n 'required': ['via'],\n 'additionalProperties': False\n },\n 'task': {\n 'type': 'object',\n 'properties': {\n 'title': {\n 'type': 'string',\n 'default': '[FlexGet] {{task.name}}:'\n '{%if task.failed %} {{task.failed|length}} failed entries.{% endif %}'\n '{% if task.accepted %} {{task.accepted|length}} new entries downloaded.{% endif %}'},\n 'template': {'type': 'string', 'default': 'default.template'},\n 'via': VIA_SCHEMA\n },\n 'required': ['via'],\n 'additionalProperties': False\n },\n 'abort': {\n 'type': 'object',\n 'properties': {\n 'title': {'type': 'string', 'default': 'Task {{ task.name }} has aborted!'},\n 'message': {'type': 'string', 'default': 'Reason: {{ task.abort_reason }}'},\n 'via': VIA_SCHEMA\n },\n 'required': ['via']\n }\n },\n 'additionalProperties': False,\n 'minProperties': 1,\n 'error_minProperties': 'You must specify at least one of `entries` or `task` in your notify config.'\n }\n\n def prepare_config(self, config):\n if 'entries' in config:\n config['entries'].setdefault('what', ['accepted'])\n if not isinstance(config['entries']['what'], list):\n config['entries']['what'] = [config['entries']['what']]\n return config\n\n def send_notification(self, *args, **kwargs):\n send_notification = plugin.get_plugin_by_name('notification_framework').instance.send_notification\n try:\n send_notification(*args, **kwargs)\n except plugin.PluginError as e:\n log.error(e)\n except plugin.PluginWarning as e:\n log.warning(e)\n except Exception as e:\n log.exception(e)\n\n @plugin.priority(0)\n def on_task_output(self, task, config):\n config = self.prepare_config(config)\n if 'entries' in config:\n entries = list(itertools.chain(*(getattr(task, what) for what in config['entries']['what'])))\n if not entries:\n log.debug('No entries to notify about.')\n return\n # If a file template is defined, it overrides message\n if config['entries'].get('template'):\n try:\n message = get_template(config['entries']['template'], scope='entry')\n except ValueError:\n raise plugin.PluginError('Cannot locate template on disk: %s' % config['entries']['template'])\n else:\n message = config['entries']['message']\n for entry in entries:\n self.send_notification(config['entries']['title'], message, config['entries']['via'],\n template_renderer=entry.render)\n if 'task' in config:\n if not (task.accepted or task.failed):\n log.verbose('No accepted or failed entries, not sending a notification.')\n return\n try:\n template = get_template(config['task']['template'], scope='task')\n except ValueError:\n raise plugin.PluginError('Cannot locate template on disk: %s' % config['task']['template'])\n self.send_notification(config['task']['title'], template, config['task']['via'],\n template_renderer=task.render)\n\n def on_task_abort(self, task, config):\n if 'abort' in config:\n if task.silent_abort:\n return\n log.debug('sending abort notification')\n self.send_notification(config['abort']['title'], config['abort']['message'], config['abort']['via'],\n template_renderer=task.render)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(Notify, 'notify', api_ver=2)\n", "path": "flexget/plugins/notifiers/notify.py"}]} | 3,545 | 398 |
gh_patches_debug_18519 | rasdani/github-patches | git_diff | quantumlib__Cirq-3527 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prevent installing both cirq and cirq-unstable.
**Is your feature request related to a use case or problem? Please describe.**
If `cirq-unstable` is installed in a virtualenv and you `pip install cirq`, the virtualenv gets into a bad state. This is because they are different packages with separate metadata, but they put the python code in the same location in a `cirq` folder in site-packages. So pip thinks both are installed but in fact only the most-recently installed package is "active".
If you try to fix such a situation by uninstalling one package, the code gets removed but pip still thinks the other package is installed. The other package is now in a broken state (can't be imported, etc.). The only way to recover is to uninstall _both_ packages and then install one of them again.
**Describe the solution you'd like**
I'd like `pip install cirq` to fail if `cirq-unstable` is installed, and vice-versa. I did a quick glance at setuptools docs to see if there is some way to specify packages that _must not_ be present, but I couldn't see anything. It also doesn't seem to work to add a package requirement like `cirq != *`. I'm not sure if there's any other mechanism we can use to accomplish this.
**What is the urgency from your perspective for this issue? Is it blocking important work?**
P3 - I'm not really blocked by it, it is an idea I'd like to discuss / suggestion based on principle
We have internal workarounds for this to try to keep people's virtualenvs in a good state if they accidentally install both versions, but it'd be great if we could prevent the problem from happening at all.
</issue>
<code>
[start of setup.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17 from setuptools import find_packages, setup
18
19 # This reads the __version__ variable from cirq/_version.py
20 __version__ = ''
21 exec(open('cirq/_version.py').read())
22
23 name = 'cirq'
24
25 description = (
26 'A framework for creating, editing, and invoking '
27 'Noisy Intermediate Scale Quantum (NISQ) circuits.'
28 )
29
30 # README file as long_description.
31 long_description = io.open('README.rst', encoding='utf-8').read()
32
33 # If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package
34 # and update the version to this value.
35 if 'CIRQ_UNSTABLE_VERSION' in os.environ:
36 name = 'cirq-unstable'
37 __version__ = os.environ['CIRQ_UNSTABLE_VERSION']
38 long_description = (
39 "**This is a development version of Cirq and may be "
40 "unstable.**\n\n**For the latest stable release of Cirq "
41 "see**\n`here <https://pypi.org/project/cirq>`__.\n\n" + long_description
42 )
43
44 # Read in requirements
45 requirements = open('requirements.txt').readlines()
46 requirements = [r.strip() for r in requirements]
47 contrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()
48 contrib_requirements = [r.strip() for r in contrib_requirements]
49 dev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()
50 dev_requirements = [r.strip() for r in dev_requirements]
51
52 cirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]
53
54 # Sanity check
55 assert __version__, 'Version string cannot be empty'
56
57 setup(
58 name=name,
59 version=__version__,
60 url='http://github.com/quantumlib/cirq',
61 author='The Cirq Developers',
62 author_email='[email protected]',
63 python_requires=('>=3.6.0'),
64 install_requires=requirements,
65 extras_require={
66 'contrib': contrib_requirements,
67 'dev_env': dev_requirements + contrib_requirements,
68 },
69 license='Apache 2',
70 description=description,
71 long_description=long_description,
72 packages=cirq_packages,
73 package_data={
74 'cirq': ['py.typed'],
75 'cirq.google.api.v1': ['*.proto', '*.pyi'],
76 'cirq.google.api.v2': ['*.proto', '*.pyi'],
77 'cirq.protocols.json_test_data': ['*'],
78 },
79 )
80
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,13 @@
# README file as long_description.
long_description = io.open('README.rst', encoding='utf-8').read()
-# If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package
-# and update the version to this value.
-if 'CIRQ_UNSTABLE_VERSION' in os.environ:
- name = 'cirq-unstable'
- __version__ = os.environ['CIRQ_UNSTABLE_VERSION']
+# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.
+# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence
+# it will be a pre-release version on PyPi. See
+# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning
+# for more details.
+if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:
+ __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']
long_description = (
"**This is a development version of Cirq and may be "
"unstable.**\n\n**For the latest stable release of Cirq "
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,13 @@\n # README file as long_description.\n long_description = io.open('README.rst', encoding='utf-8').read()\n \n-# If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package\n-# and update the version to this value.\n-if 'CIRQ_UNSTABLE_VERSION' in os.environ:\n- name = 'cirq-unstable'\n- __version__ = os.environ['CIRQ_UNSTABLE_VERSION']\n+# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n+# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n+# it will be a pre-release version on PyPi. See\n+# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n+# for more details.\n+if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n+ __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n", "issue": "Prevent installing both cirq and cirq-unstable.\n**Is your feature request related to a use case or problem? Please describe.**\r\n\r\nIf `cirq-unstable` is installed in a virtualenv and you `pip install cirq`, the virtualenv gets into a bad state. This is because they are different packages with separate metadata, but they put the python code in the same location in a `cirq` folder in site-packages. So pip thinks both are installed but in fact only the most-recently installed package is \"active\".\r\n\r\nIf you try to fix such a situation by uninstalling one package, the code gets removed but pip still thinks the other package is installed. The other package is now in a broken state (can't be imported, etc.). The only way to recover is to uninstall _both_ packages and then install one of them again.\r\n\r\n**Describe the solution you'd like**\r\n\r\nI'd like `pip install cirq` to fail if `cirq-unstable` is installed, and vice-versa. I did a quick glance at setuptools docs to see if there is some way to specify packages that _must not_ be present, but I couldn't see anything. It also doesn't seem to work to add a package requirement like `cirq != *`. I'm not sure if there's any other mechanism we can use to accomplish this.\r\n\r\n**What is the urgency from your perspective for this issue? Is it blocking important work?**\r\n\r\nP3 - I'm not really blocked by it, it is an idea I'd like to discuss / suggestion based on principle \r\n\r\nWe have internal workarounds for this to try to keep people's virtualenvs in a good state if they accidentally install both versions, but it'd be great if we could prevent the problem from happening at all.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\nexec(open('cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_UNSTABLE_VERSION is set then we use cirq-unstable as the name of the package\n# and update the version to this value.\nif 'CIRQ_UNSTABLE_VERSION' in os.environ:\n name = 'cirq-unstable'\n __version__ = os.environ['CIRQ_UNSTABLE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Read in requirements\nrequirements = open('requirements.txt').readlines()\nrequirements = [r.strip() for r in requirements]\ncontrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()\ncontrib_requirements = [r.strip() for r in contrib_requirements]\ndev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()\ndev_requirements = [r.strip() for r in dev_requirements]\n\ncirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires=('>=3.6.0'),\n install_requires=requirements,\n extras_require={\n 'contrib': contrib_requirements,\n 'dev_env': dev_requirements + contrib_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n packages=cirq_packages,\n package_data={\n 'cirq': ['py.typed'],\n 'cirq.google.api.v1': ['*.proto', '*.pyi'],\n 'cirq.google.api.v2': ['*.proto', '*.pyi'],\n 'cirq.protocols.json_test_data': ['*'],\n },\n)\n", "path": "setup.py"}]} | 1,741 | 288 |
gh_patches_debug_9804 | rasdani/github-patches | git_diff | bokeh__bokeh-9604 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEATURE] BokehJS embed_item should return a reference to the plot.
Bokeh.embed.embed_item currently returns nothing. I propose it should return a reference to the plot object so I don't have to go fishing (Bokeh.index[plotData.root_id]) in Bokeh.index to do things like call resize_layout() manually.
Honestly any sort of 'official' method for doing this would be super useful.
</issue>
<code>
[start of examples/embed/json_item.py]
1 import json
2
3 from flask import Flask
4 from jinja2 import Template
5
6 from bokeh.embed import json_item
7 from bokeh.plotting import figure
8 from bokeh.resources import CDN
9 from bokeh.sampledata.iris import flowers
10
11 app = Flask(__name__)
12
13 page = Template("""
14 <!DOCTYPE html>
15 <html lang="en">
16 <head>
17 {{ resources }}
18 </head>
19
20 <body>
21 <div id="myplot"></div>
22 <div id="myplot2"></div>
23 <script>
24 fetch('/plot')
25 .then(function(response) { return response.json(); })
26 .then(function(item) { Bokeh.embed.embed_item(item); })
27 </script>
28 <script>
29 fetch('/plot2')
30 .then(function(response) { return response.json(); })
31 .then(function(item) { Bokeh.embed.embed_item(item, "myplot2"); })
32 </script>
33 </body>
34 """)
35
36 colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}
37 colors = [colormap[x] for x in flowers['species']]
38
39 def make_plot(x, y):
40 p = figure(title = "Iris Morphology", sizing_mode="fixed", plot_width=400, plot_height=400)
41 p.xaxis.axis_label = x
42 p.yaxis.axis_label = y
43 p.circle(flowers[x], flowers[y], color=colors, fill_alpha=0.2, size=10)
44 return p
45
46 @app.route('/')
47 def root():
48 return page.render(resources=CDN.render())
49
50 @app.route('/plot')
51 def plot():
52 p = make_plot('petal_width', 'petal_length')
53 return json.dumps(json_item(p, "myplot"))
54
55 @app.route('/plot2')
56 def plot2():
57 p = make_plot('sepal_width', 'sepal_length')
58 return json.dumps(json_item(p))
59
60 if __name__ == '__main__':
61 app.run()
62
[end of examples/embed/json_item.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/embed/json_item.py b/examples/embed/json_item.py
--- a/examples/embed/json_item.py
+++ b/examples/embed/json_item.py
@@ -23,12 +23,12 @@
<script>
fetch('/plot')
.then(function(response) { return response.json(); })
- .then(function(item) { Bokeh.embed.embed_item(item); })
+ .then(function(item) { return Bokeh.embed.embed_item(item); })
</script>
<script>
fetch('/plot2')
.then(function(response) { return response.json(); })
- .then(function(item) { Bokeh.embed.embed_item(item, "myplot2"); })
+ .then(function(item) { return Bokeh.embed.embed_item(item, "myplot2"); })
</script>
</body>
""")
| {"golden_diff": "diff --git a/examples/embed/json_item.py b/examples/embed/json_item.py\n--- a/examples/embed/json_item.py\n+++ b/examples/embed/json_item.py\n@@ -23,12 +23,12 @@\n <script>\n fetch('/plot')\n .then(function(response) { return response.json(); })\n- .then(function(item) { Bokeh.embed.embed_item(item); })\n+ .then(function(item) { return Bokeh.embed.embed_item(item); })\n </script>\n <script>\n fetch('/plot2')\n .then(function(response) { return response.json(); })\n- .then(function(item) { Bokeh.embed.embed_item(item, \"myplot2\"); })\n+ .then(function(item) { return Bokeh.embed.embed_item(item, \"myplot2\"); })\n </script>\n </body>\n \"\"\")\n", "issue": "[FEATURE] BokehJS embed_item should return a reference to the plot. \nBokeh.embed.embed_item currently returns nothing. I propose it should return a reference to the plot object so I don't have to go fishing (Bokeh.index[plotData.root_id]) in Bokeh.index to do things like call resize_layout() manually.\r\n\r\nHonestly any sort of 'official' method for doing this would be super useful.\r\n\n", "before_files": [{"content": "import json\n\nfrom flask import Flask\nfrom jinja2 import Template\n\nfrom bokeh.embed import json_item\nfrom bokeh.plotting import figure\nfrom bokeh.resources import CDN\nfrom bokeh.sampledata.iris import flowers\n\napp = Flask(__name__)\n\npage = Template(\"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n {{ resources }}\n</head>\n\n<body>\n <div id=\"myplot\"></div>\n <div id=\"myplot2\"></div>\n <script>\n fetch('/plot')\n .then(function(response) { return response.json(); })\n .then(function(item) { Bokeh.embed.embed_item(item); })\n </script>\n <script>\n fetch('/plot2')\n .then(function(response) { return response.json(); })\n .then(function(item) { Bokeh.embed.embed_item(item, \"myplot2\"); })\n </script>\n</body>\n\"\"\")\n\ncolormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}\ncolors = [colormap[x] for x in flowers['species']]\n\ndef make_plot(x, y):\n p = figure(title = \"Iris Morphology\", sizing_mode=\"fixed\", plot_width=400, plot_height=400)\n p.xaxis.axis_label = x\n p.yaxis.axis_label = y\n p.circle(flowers[x], flowers[y], color=colors, fill_alpha=0.2, size=10)\n return p\n\[email protected]('/')\ndef root():\n return page.render(resources=CDN.render())\n\[email protected]('/plot')\ndef plot():\n p = make_plot('petal_width', 'petal_length')\n return json.dumps(json_item(p, \"myplot\"))\n\[email protected]('/plot2')\ndef plot2():\n p = make_plot('sepal_width', 'sepal_length')\n return json.dumps(json_item(p))\n\nif __name__ == '__main__':\n app.run()\n", "path": "examples/embed/json_item.py"}]} | 1,163 | 178 |
gh_patches_debug_60846 | rasdani/github-patches | git_diff | uclapi__uclapi-883 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AMP Doesn't Return STUDENT_TYPE
I am an idiot.
https://github.com/uclapi/uclapi/blob/69f0d3240d8ec6cf2a3b018897dc247aa58cb1bf/backend/uclapi/timetable/amp.py#L31
</issue>
<code>
[start of backend/uclapi/timetable/amp.py]
1 """
2 UCL Academic Modelling Project
3 Fast Code Processing
4 """
5
6 STUDENT_TYPES = {
7 'A': "Campus-based, numeric mark scheme",
8 'B': "Campus-based, non-numeric mark scheme",
9 'C': "Distance learner, numeric mark scheme",
10 'D': "Distance learner, non-numeric mark scheme",
11 'E': "MBBS Resit"
12 }
13
14
15 class InvalidAMPCodeException(Exception):
16 pass
17
18
19 class ModuleDelivery:
20 def __init__(self, delivery_code):
21 # Sanity check the code we have
22 if len(delivery_code) != 3:
23 raise InvalidAMPCodeException("Delivery code is too long")
24 if delivery_code[0] in STUDENT_TYPES:
25 self.student_type = STUDENT_TYPES[delivery_code[0]]
26 else:
27 raise InvalidAMPCodeException("Student type is not valid")
28 self.fheq_level = int(delivery_code[1])
29 self.undergraduate = delivery_code[2] == 'U'
30
31 def get_delivery(self):
32 return {
33 "fheq_level": self.fheq_level,
34 "is_undergraduate": self.undergraduate
35 }
36
37
38 class ModulePeriods:
39 # Default Attributes
40 term_1 = False
41 term_2 = False
42 term_3 = False
43 term_4 = False # Term 1 of the next academic year
44 summer = False # Summer Teaching Period
45 summer_school = False # UCL Summer School
46 summer_school_1 = False # UCL Summer School Session 1
47 summer_school_2 = False # UCL Summer School Session 2
48 lsr = False # Late Summer Resit period
49 year = False # Whole year module
50
51 def __init__(self, periods_code):
52 if periods_code == 'YEAR':
53 self.term_1 = True
54 self.term_2 = True
55 self.term_3 = True
56 self.year = True
57 elif periods_code == 'SUMMER':
58 self.summer = True
59 elif periods_code == 'LSR':
60 self.lsr = True
61 elif periods_code[0] == 'S':
62 # Summer School periods start with an S.
63 # S1, S2, S1+2
64 self.summer_school = True
65 if periods_code == 'S1':
66 self.summer_school_1 = True
67 elif periods_code == 'S2':
68 self.summer_school_2 = True
69 elif periods_code == 'S1+2':
70 self.summer_school_1 = True
71 self.summer_school_2 = True
72 else:
73 raise InvalidAMPCodeException(
74 "An invalid AMP code was found: " + periods_code
75 )
76 elif periods_code[0] == 'T':
77 # Normal classes start with a T for Term
78 if periods_code == 'T1':
79 self.term_1 = True
80 elif periods_code == 'T1/2':
81 self.term_1 = True
82 self.term_2 = True
83 elif periods_code == 'T1/2/3':
84 self.term_1 = True
85 self.term_2 = True
86 self.term_3 = True
87 elif periods_code == 'T1/3':
88 self.term_1 = True
89 self.term_3 = True
90 elif periods_code == 'T2':
91 self.term_2 = True
92 elif periods_code == 'T2/3':
93 self.term_2 = True
94 self.term_3 = True
95 elif periods_code == 'T2/3/S' or periods_code == 'T2/3/4':
96 self.term_2 = True
97 self.term_3 = True
98 self.summer = True
99 elif periods_code == 'T3':
100 self.term_3 = True
101 elif periods_code == 'T3/1':
102 self.term_3 = True
103 self.term_4 = True
104 elif periods_code == 'T3/S' or periods_code == 'T3/4':
105 self.term_3 = True
106 self.summer = True
107 elif periods_code == 'T4':
108 self.term_4 = True
109 else:
110 raise InvalidAMPCodeException(
111 "AMP Periods Code contained an invalid term element"
112 )
113 else:
114 raise InvalidAMPCodeException(
115 "An invalid AMP code was found: " + periods_code
116 )
117
118 def get_periods(self):
119 return {
120 "teaching_periods": {
121 "term_1": self.term_1,
122 "term_2": self.term_2,
123 "term_3": self.term_3,
124 "term_1_next_year": self.term_4,
125 "summer": self.summer
126 },
127 "year_long": self.year,
128 "lsr": self.lsr,
129 "summer_school": {
130 "is_summer_school": self.summer_school,
131 "sessions": {
132 "session_1": self.summer_school_1,
133 "session_2": self.summer_school_2
134 }
135 }
136 }
137
138
139 class ModuleInstance:
140 def __init__(self, amp_code):
141 """
142 An AMP Code is stored as the INSTID in CMIS.
143 It looks something like this: A6U-T1/2
144 """
145 parts = amp_code.split('-')
146 module_delivery_code = parts[0] # A6U
147 periods_code = parts[1] # T1/2
148
149 self.delivery = ModuleDelivery(module_delivery_code)
150 self.periods = ModulePeriods(periods_code)
151
[end of backend/uclapi/timetable/amp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/timetable/amp.py b/backend/uclapi/timetable/amp.py
--- a/backend/uclapi/timetable/amp.py
+++ b/backend/uclapi/timetable/amp.py
@@ -31,7 +31,8 @@
def get_delivery(self):
return {
"fheq_level": self.fheq_level,
- "is_undergraduate": self.undergraduate
+ "is_undergraduate": self.undergraduate,
+ "student_type": self.student_type
}
| {"golden_diff": "diff --git a/backend/uclapi/timetable/amp.py b/backend/uclapi/timetable/amp.py\n--- a/backend/uclapi/timetable/amp.py\n+++ b/backend/uclapi/timetable/amp.py\n@@ -31,7 +31,8 @@\n def get_delivery(self):\r\n return {\r\n \"fheq_level\": self.fheq_level,\r\n- \"is_undergraduate\": self.undergraduate\r\n+ \"is_undergraduate\": self.undergraduate,\r\n+ \"student_type\": self.student_type\r\n }\n", "issue": "AMP Doesn't Return STUDENT_TYPE\nI am an idiot.\r\n\r\nhttps://github.com/uclapi/uclapi/blob/69f0d3240d8ec6cf2a3b018897dc247aa58cb1bf/backend/uclapi/timetable/amp.py#L31\n", "before_files": [{"content": "\"\"\"\r\nUCL Academic Modelling Project\r\nFast Code Processing\r\n\"\"\"\r\n\r\nSTUDENT_TYPES = {\r\n 'A': \"Campus-based, numeric mark scheme\",\r\n 'B': \"Campus-based, non-numeric mark scheme\",\r\n 'C': \"Distance learner, numeric mark scheme\",\r\n 'D': \"Distance learner, non-numeric mark scheme\",\r\n 'E': \"MBBS Resit\"\r\n}\r\n\r\n\r\nclass InvalidAMPCodeException(Exception):\r\n pass\r\n\r\n\r\nclass ModuleDelivery:\r\n def __init__(self, delivery_code):\r\n # Sanity check the code we have\r\n if len(delivery_code) != 3:\r\n raise InvalidAMPCodeException(\"Delivery code is too long\")\r\n if delivery_code[0] in STUDENT_TYPES:\r\n self.student_type = STUDENT_TYPES[delivery_code[0]]\r\n else:\r\n raise InvalidAMPCodeException(\"Student type is not valid\")\r\n self.fheq_level = int(delivery_code[1])\r\n self.undergraduate = delivery_code[2] == 'U'\r\n\r\n def get_delivery(self):\r\n return {\r\n \"fheq_level\": self.fheq_level,\r\n \"is_undergraduate\": self.undergraduate\r\n }\r\n\r\n\r\nclass ModulePeriods:\r\n # Default Attributes\r\n term_1 = False\r\n term_2 = False\r\n term_3 = False\r\n term_4 = False # Term 1 of the next academic year\r\n summer = False # Summer Teaching Period\r\n summer_school = False # UCL Summer School\r\n summer_school_1 = False # UCL Summer School Session 1\r\n summer_school_2 = False # UCL Summer School Session 2\r\n lsr = False # Late Summer Resit period\r\n year = False # Whole year module\r\n\r\n def __init__(self, periods_code):\r\n if periods_code == 'YEAR':\r\n self.term_1 = True\r\n self.term_2 = True\r\n self.term_3 = True\r\n self.year = True\r\n elif periods_code == 'SUMMER':\r\n self.summer = True\r\n elif periods_code == 'LSR':\r\n self.lsr = True\r\n elif periods_code[0] == 'S':\r\n # Summer School periods start with an S.\r\n # S1, S2, S1+2\r\n self.summer_school = True\r\n if periods_code == 'S1':\r\n self.summer_school_1 = True\r\n elif periods_code == 'S2':\r\n self.summer_school_2 = True\r\n elif periods_code == 'S1+2':\r\n self.summer_school_1 = True\r\n self.summer_school_2 = True\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"An invalid AMP code was found: \" + periods_code\r\n )\r\n elif periods_code[0] == 'T':\r\n # Normal classes start with a T for Term\r\n if periods_code == 'T1':\r\n self.term_1 = True\r\n elif periods_code == 'T1/2':\r\n self.term_1 = True\r\n self.term_2 = True\r\n elif periods_code == 'T1/2/3':\r\n self.term_1 = True\r\n self.term_2 = True\r\n self.term_3 = True\r\n elif periods_code == 'T1/3':\r\n self.term_1 = True\r\n self.term_3 = True\r\n elif periods_code == 'T2':\r\n self.term_2 = True\r\n elif periods_code == 'T2/3':\r\n self.term_2 = True\r\n self.term_3 = True\r\n elif periods_code == 'T2/3/S' or periods_code == 'T2/3/4':\r\n self.term_2 = True\r\n self.term_3 = True\r\n self.summer = True\r\n elif periods_code == 'T3':\r\n self.term_3 = True\r\n elif periods_code == 'T3/1':\r\n self.term_3 = True\r\n self.term_4 = True\r\n elif periods_code == 'T3/S' or periods_code == 'T3/4':\r\n self.term_3 = True\r\n self.summer = True\r\n elif periods_code == 'T4':\r\n self.term_4 = True\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"AMP Periods Code contained an invalid term element\"\r\n )\r\n else:\r\n raise InvalidAMPCodeException(\r\n \"An invalid AMP code was found: \" + periods_code\r\n )\r\n\r\n def get_periods(self):\r\n return {\r\n \"teaching_periods\": {\r\n \"term_1\": self.term_1,\r\n \"term_2\": self.term_2,\r\n \"term_3\": self.term_3,\r\n \"term_1_next_year\": self.term_4,\r\n \"summer\": self.summer\r\n },\r\n \"year_long\": self.year,\r\n \"lsr\": self.lsr,\r\n \"summer_school\": {\r\n \"is_summer_school\": self.summer_school,\r\n \"sessions\": {\r\n \"session_1\": self.summer_school_1,\r\n \"session_2\": self.summer_school_2\r\n }\r\n }\r\n }\r\n\r\n\r\nclass ModuleInstance:\r\n def __init__(self, amp_code):\r\n \"\"\"\r\n An AMP Code is stored as the INSTID in CMIS.\r\n It looks something like this: A6U-T1/2\r\n \"\"\"\r\n parts = amp_code.split('-')\r\n module_delivery_code = parts[0] # A6U\r\n periods_code = parts[1] # T1/2\r\n\r\n self.delivery = ModuleDelivery(module_delivery_code)\r\n self.periods = ModulePeriods(periods_code)\r\n", "path": "backend/uclapi/timetable/amp.py"}]} | 2,185 | 122 |
gh_patches_debug_16323 | rasdani/github-patches | git_diff | litestar-org__litestar-2602 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: update TODO app w/ sqlalchemy tutorial to use autocommitting before send handler.
> @AgarwalPragy You are correct. By default, the plugin session handler does not automatically commit on a successful response. You can easily change it by using the following `before_send` handler:
>
> ```python
> from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler
>
> db_url = "sqlite+aiosqlite:///:memory:"
> app = Litestar(
> route_handlers=[hello],
> plugins=[
> SQLAlchemyPlugin(
> config=SQLAlchemyAsyncConfig(
> connection_string=db_url,
> session_dependency_key="transaction",
> create_all=True,
> alembic_config=AlembicAsyncConfig(target_metadata=orm_registry.metadata),
> before_send_handler=autocommit_before_send_handler,
> ),
> ),
> ],
> )
> ```
I'd say this is a documentation bug on our side now.
We should update https://docs.litestar.dev/latest/tutorials/sqlalchemy/3-init-plugin.html to do the same as this (it was written before the autocommit handler was a part of the plugin, IIRC).
_Originally posted by @peterschutt in https://github.com/litestar-org/litestar/issues/2556#issuecomment-1786287414_
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2569">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py]
1 from typing import AsyncGenerator, List, Optional
2
3 from sqlalchemy import select
4 from sqlalchemy.exc import IntegrityError, NoResultFound
5 from sqlalchemy.ext.asyncio import AsyncSession
6 from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
7
8 from litestar import Litestar, get, post, put
9 from litestar.contrib.sqlalchemy.plugins import SQLAlchemyAsyncConfig, SQLAlchemyPlugin
10 from litestar.exceptions import ClientException, NotFoundException
11 from litestar.status_codes import HTTP_409_CONFLICT
12
13
14 class Base(DeclarativeBase):
15 ...
16
17
18 class TodoItem(Base):
19 __tablename__ = "todo_items"
20
21 title: Mapped[str] = mapped_column(primary_key=True)
22 done: Mapped[bool]
23
24
25 async def provide_transaction(db_session: AsyncSession) -> AsyncGenerator[AsyncSession, None]:
26 try:
27 async with db_session.begin():
28 yield db_session
29 except IntegrityError as exc:
30 raise ClientException(
31 status_code=HTTP_409_CONFLICT,
32 detail=str(exc),
33 ) from exc
34
35
36 async def get_todo_by_title(todo_name, session: AsyncSession) -> TodoItem:
37 query = select(TodoItem).where(TodoItem.title == todo_name)
38 result = await session.execute(query)
39 try:
40 return result.scalar_one()
41 except NoResultFound as e:
42 raise NotFoundException(detail=f"TODO {todo_name!r} not found") from e
43
44
45 async def get_todo_list(done: Optional[bool], session: AsyncSession) -> List[TodoItem]:
46 query = select(TodoItem)
47 if done is not None:
48 query = query.where(TodoItem.done.is_(done))
49
50 result = await session.execute(query)
51 return result.scalars().all()
52
53
54 @get("/")
55 async def get_list(transaction: AsyncSession, done: Optional[bool] = None) -> List[TodoItem]:
56 return await get_todo_list(done, transaction)
57
58
59 @post("/")
60 async def add_item(data: TodoItem, transaction: AsyncSession) -> TodoItem:
61 transaction.add(data)
62 return data
63
64
65 @put("/{item_title:str}")
66 async def update_item(item_title: str, data: TodoItem, transaction: AsyncSession) -> TodoItem:
67 todo_item = await get_todo_by_title(item_title, transaction)
68 todo_item.title = data.title
69 todo_item.done = data.done
70 return todo_item
71
72
73 db_config = SQLAlchemyAsyncConfig(
74 connection_string="sqlite+aiosqlite:///todo.sqlite", metadata=Base.metadata, create_all=True
75 )
76
77 app = Litestar(
78 [get_list, add_item, update_item],
79 dependencies={"transaction": provide_transaction},
80 plugins=[SQLAlchemyPlugin(db_config)],
81 )
82
[end of docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py
--- a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py
+++ b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py
@@ -1,5 +1,6 @@
from typing import AsyncGenerator, List, Optional
+from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler
from sqlalchemy import select
from sqlalchemy.exc import IntegrityError, NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
@@ -71,7 +72,10 @@
db_config = SQLAlchemyAsyncConfig(
- connection_string="sqlite+aiosqlite:///todo.sqlite", metadata=Base.metadata, create_all=True
+ connection_string="sqlite+aiosqlite:///todo.sqlite",
+ metadata=Base.metadata,
+ create_all=True,
+ before_send_handler=autocommit_before_send_handler,
)
app = Litestar(
| {"golden_diff": "diff --git a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py\n--- a/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py\n+++ b/docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py\n@@ -1,5 +1,6 @@\n from typing import AsyncGenerator, List, Optional\n \n+from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler\n from sqlalchemy import select\n from sqlalchemy.exc import IntegrityError, NoResultFound\n from sqlalchemy.ext.asyncio import AsyncSession\n@@ -71,7 +72,10 @@\n \n \n db_config = SQLAlchemyAsyncConfig(\n- connection_string=\"sqlite+aiosqlite:///todo.sqlite\", metadata=Base.metadata, create_all=True\n+ connection_string=\"sqlite+aiosqlite:///todo.sqlite\",\n+ metadata=Base.metadata,\n+ create_all=True,\n+ before_send_handler=autocommit_before_send_handler,\n )\n \n app = Litestar(\n", "issue": "docs: update TODO app w/ sqlalchemy tutorial to use autocommitting before send handler.\n > @AgarwalPragy You are correct. By default, the plugin session handler does not automatically commit on a successful response. You can easily change it by using the following `before_send` handler:\r\n> \r\n> ```python\r\n> from advanced_alchemy.extensions.litestar.plugins.init.config.asyncio import autocommit_before_send_handler\r\n> \r\n> db_url = \"sqlite+aiosqlite:///:memory:\"\r\n> app = Litestar(\r\n> route_handlers=[hello],\r\n> plugins=[\r\n> SQLAlchemyPlugin(\r\n> config=SQLAlchemyAsyncConfig(\r\n> connection_string=db_url,\r\n> session_dependency_key=\"transaction\",\r\n> create_all=True,\r\n> alembic_config=AlembicAsyncConfig(target_metadata=orm_registry.metadata),\r\n> before_send_handler=autocommit_before_send_handler,\r\n> ),\r\n> ),\r\n> ],\r\n> )\r\n> ```\r\n\r\nI'd say this is a documentation bug on our side now.\r\n\r\nWe should update https://docs.litestar.dev/latest/tutorials/sqlalchemy/3-init-plugin.html to do the same as this (it was written before the autocommit handler was a part of the plugin, IIRC).\r\n\r\n_Originally posted by @peterschutt in https://github.com/litestar-org/litestar/issues/2556#issuecomment-1786287414_\r\n \n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2569\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2569/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from typing import AsyncGenerator, List, Optional\n\nfrom sqlalchemy import select\nfrom sqlalchemy.exc import IntegrityError, NoResultFound\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column\n\nfrom litestar import Litestar, get, post, put\nfrom litestar.contrib.sqlalchemy.plugins import SQLAlchemyAsyncConfig, SQLAlchemyPlugin\nfrom litestar.exceptions import ClientException, NotFoundException\nfrom litestar.status_codes import HTTP_409_CONFLICT\n\n\nclass Base(DeclarativeBase):\n ...\n\n\nclass TodoItem(Base):\n __tablename__ = \"todo_items\"\n\n title: Mapped[str] = mapped_column(primary_key=True)\n done: Mapped[bool]\n\n\nasync def provide_transaction(db_session: AsyncSession) -> AsyncGenerator[AsyncSession, None]:\n try:\n async with db_session.begin():\n yield db_session\n except IntegrityError as exc:\n raise ClientException(\n status_code=HTTP_409_CONFLICT,\n detail=str(exc),\n ) from exc\n\n\nasync def get_todo_by_title(todo_name, session: AsyncSession) -> TodoItem:\n query = select(TodoItem).where(TodoItem.title == todo_name)\n result = await session.execute(query)\n try:\n return result.scalar_one()\n except NoResultFound as e:\n raise NotFoundException(detail=f\"TODO {todo_name!r} not found\") from e\n\n\nasync def get_todo_list(done: Optional[bool], session: AsyncSession) -> List[TodoItem]:\n query = select(TodoItem)\n if done is not None:\n query = query.where(TodoItem.done.is_(done))\n\n result = await session.execute(query)\n return result.scalars().all()\n\n\n@get(\"/\")\nasync def get_list(transaction: AsyncSession, done: Optional[bool] = None) -> List[TodoItem]:\n return await get_todo_list(done, transaction)\n\n\n@post(\"/\")\nasync def add_item(data: TodoItem, transaction: AsyncSession) -> TodoItem:\n transaction.add(data)\n return data\n\n\n@put(\"/{item_title:str}\")\nasync def update_item(item_title: str, data: TodoItem, transaction: AsyncSession) -> TodoItem:\n todo_item = await get_todo_by_title(item_title, transaction)\n todo_item.title = data.title\n todo_item.done = data.done\n return todo_item\n\n\ndb_config = SQLAlchemyAsyncConfig(\n connection_string=\"sqlite+aiosqlite:///todo.sqlite\", metadata=Base.metadata, create_all=True\n)\n\napp = Litestar(\n [get_list, add_item, update_item],\n dependencies={\"transaction\": provide_transaction},\n plugins=[SQLAlchemyPlugin(db_config)],\n)\n", "path": "docs/examples/contrib/sqlalchemy/plugins/tutorial/full_app_with_plugin.py"}]} | 1,887 | 224 |
gh_patches_debug_33184 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2020 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Slow performance in async execution because of DirectivesExtension
The use of DirectivesExtension in async graphql execution wraps every field of the result graph in an async function. This slows down performance.
We have one resolver returning a huge list of objects where every field of the model is already filled with a native type (str, int ..). So there is no need for an asynchronous resolve of these model fields.
But the DirectivesExtension calls `await_maybe` for each field of each model in the result list. Now there are thousands of async function calls which should be awaited.
We don't use custom directives so I replaced the Schema.execute function (from strawberry.schema.schema.py). In this function I only register the DirectivesExtension if there are custom directives. For us this solves the problem, as long as we don't use custom directives. But I think it would be a modification that could also be merged in the strawberry sourcecode.
Here is a demo script with the used patch, showing the dramatic performance improvement for this case.
```
import asyncio
import time
import typing
from typing import Any, Dict, Optional
import strawberry
from strawberry import Schema
from strawberry.extensions.directives import DirectivesExtension
from strawberry.schema.execute import execute
from strawberry.types import ExecutionContext, ExecutionResult
@strawberry.type
class Book:
author: str
title: str
async def get_books():
res = []
await asyncio.sleep(0.1)
for i in range(200000):
res.append(
Book(
author="Unknown",
title=f"Book nr {i}",
)
)
return res
@strawberry.type
class DemoQuery:
books: typing.List[Book] = strawberry.field(resolver=get_books)
async def test():
start_time = time.time()
schema = strawberry.Schema(query=DemoQuery)
query = "{ books { title author } }"
result = await schema.execute(
query,
)
if result.errors:
print(result.errors)
print("complete", time.time() - start_time)
async def schema_execute(
self,
query: str,
variable_values: Optional[Dict[str, Any]] = None,
context_value: Optional[Any] = None,
root_value: Optional[Any] = None,
operation_name: Optional[str] = None,
) -> ExecutionResult:
# Create execution context
execution_context = ExecutionContext(
query=query,
schema=self,
context=context_value,
root_value=root_value,
variables=variable_values,
provided_operation_name=operation_name,
)
# BEGIN - CHANGED
extensions = list(self.extensions)
if len(self.directives) > 0:
extensions += [DirectivesExtension]
result = await execute(
self._schema, # pylint: disable=protected-access
query,
extensions=extensions,
execution_context_class=self.execution_context_class,
execution_context=execution_context,
)
# END - CHANGED
if result.errors:
self.process_errors(result.errors, execution_context=execution_context)
return result
def apply_directives_patch():
Schema.execute = schema_execute
def run():
asyncio.run(test())
def run_with_patch():
apply_directives_patch()
asyncio.run(test())
if __name__ == "__main__":
print("run")
run()
print("run with patch")
run_with_patch()
```
On my Machine I get the following output:
```
run
complete 24.10178303718567
run with patch
complete 4.264432191848755
```
</issue>
<code>
[start of strawberry/schema/schema.py]
1 from functools import lru_cache
2 from typing import Any, Dict, Iterable, Optional, Sequence, Type, Union
3
4 from graphql import (
5 ExecutionContext as GraphQLExecutionContext,
6 GraphQLNamedType,
7 GraphQLNonNull,
8 GraphQLSchema,
9 get_introspection_query,
10 parse,
11 validate_schema,
12 )
13 from graphql.subscription import subscribe
14 from graphql.type.directives import specified_directives
15
16 from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
17 from strawberry.directive import StrawberryDirective
18 from strawberry.enum import EnumDefinition
19 from strawberry.extensions import Extension
20 from strawberry.extensions.directives import (
21 DirectivesExtension,
22 DirectivesExtensionSync,
23 )
24 from strawberry.field import StrawberryField
25 from strawberry.schema.schema_converter import GraphQLCoreConverter
26 from strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY
27 from strawberry.types import ExecutionContext, ExecutionResult
28 from strawberry.types.graphql import OperationType
29 from strawberry.types.types import TypeDefinition
30 from strawberry.union import StrawberryUnion
31
32 from ..printer import print_schema
33 from .base import BaseSchema
34 from .config import StrawberryConfig
35 from .execute import execute, execute_sync
36
37
38 DEFAULT_ALLOWED_OPERATION_TYPES = {
39 OperationType.QUERY,
40 OperationType.MUTATION,
41 OperationType.SUBSCRIPTION,
42 }
43
44
45 class Schema(BaseSchema):
46 def __init__(
47 self,
48 # TODO: can we make sure we only allow to pass something that has been decorated?
49 query: Type,
50 mutation: Optional[Type] = None,
51 subscription: Optional[Type] = None,
52 directives: Sequence[StrawberryDirective] = (),
53 types=(),
54 extensions: Sequence[Union[Type[Extension], Extension]] = (),
55 execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
56 config: Optional[StrawberryConfig] = None,
57 scalar_overrides: Optional[
58 Dict[object, Union[ScalarWrapper, ScalarDefinition]]
59 ] = None,
60 ):
61 self.extensions = extensions
62 self.execution_context_class = execution_context_class
63 self.config = config or StrawberryConfig()
64
65 scalar_registry: Dict[object, Union[ScalarWrapper, ScalarDefinition]] = {
66 **DEFAULT_SCALAR_REGISTRY
67 }
68 if scalar_overrides:
69 scalar_registry.update(scalar_overrides)
70
71 self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)
72 self.directives = directives
73
74 query_type = self.schema_converter.from_object(query._type_definition)
75 mutation_type = (
76 self.schema_converter.from_object(mutation._type_definition)
77 if mutation
78 else None
79 )
80 subscription_type = (
81 self.schema_converter.from_object(subscription._type_definition)
82 if subscription
83 else None
84 )
85
86 graphql_directives = tuple(
87 self.schema_converter.from_directive(directive) for directive in directives
88 )
89
90 graphql_types = []
91 for type_ in types:
92 graphql_type = self.schema_converter.from_maybe_optional(type_)
93 if isinstance(graphql_type, GraphQLNonNull):
94 graphql_type = graphql_type.of_type
95 if not isinstance(graphql_type, GraphQLNamedType):
96 raise TypeError(f"{graphql_type} is not a named GraphQL Type")
97 graphql_types.append(graphql_type)
98
99 self._schema = GraphQLSchema(
100 query=query_type,
101 mutation=mutation_type,
102 subscription=subscription_type if subscription else None,
103 directives=specified_directives + graphql_directives,
104 types=graphql_types,
105 extensions={
106 GraphQLCoreConverter.DEFINITION_BACKREF: self,
107 },
108 )
109
110 # attach our schema to the GraphQL schema instance
111 self._schema._strawberry_schema = self # type: ignore
112
113 # Validate schema early because we want developers to know about
114 # possible issues as soon as possible
115 errors = validate_schema(self._schema)
116 if errors:
117 formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors)
118 raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}")
119
120 self.query = self.schema_converter.type_map[query_type.name]
121
122 @lru_cache()
123 def get_type_by_name( # type: ignore # lru_cache makes mypy complain
124 self, name: str
125 ) -> Optional[
126 Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
127 ]:
128 # TODO: respect auto_camel_case
129 if name in self.schema_converter.type_map:
130 return self.schema_converter.type_map[name].definition
131
132 return None
133
134 def get_field_for_type(
135 self, field_name: str, type_name: str
136 ) -> Optional[StrawberryField]:
137 type_ = self.get_type_by_name(type_name)
138
139 if not type_:
140 return None # pragma: no cover
141
142 assert isinstance(type_, TypeDefinition)
143
144 return next(
145 (
146 field
147 for field in type_.fields
148 if self.config.name_converter.get_graphql_name(field) == field_name
149 ),
150 None,
151 )
152
153 @lru_cache()
154 def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:
155 return next(
156 (
157 directive
158 for directive in self.directives
159 if self.config.name_converter.from_directive(directive) == graphql_name
160 ),
161 None,
162 )
163
164 async def execute(
165 self,
166 query: str,
167 variable_values: Optional[Dict[str, Any]] = None,
168 context_value: Optional[Any] = None,
169 root_value: Optional[Any] = None,
170 operation_name: Optional[str] = None,
171 allowed_operation_types: Optional[Iterable[OperationType]] = None,
172 ) -> ExecutionResult:
173 if allowed_operation_types is None:
174 allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
175
176 # Create execution context
177 execution_context = ExecutionContext(
178 query=query,
179 schema=self,
180 context=context_value,
181 root_value=root_value,
182 variables=variable_values,
183 provided_operation_name=operation_name,
184 )
185
186 result = await execute(
187 self._schema,
188 query,
189 extensions=list(self.extensions) + [DirectivesExtension],
190 execution_context_class=self.execution_context_class,
191 execution_context=execution_context,
192 allowed_operation_types=allowed_operation_types,
193 )
194
195 if result.errors:
196 self.process_errors(result.errors, execution_context=execution_context)
197
198 return result
199
200 def execute_sync(
201 self,
202 query: str,
203 variable_values: Optional[Dict[str, Any]] = None,
204 context_value: Optional[Any] = None,
205 root_value: Optional[Any] = None,
206 operation_name: Optional[str] = None,
207 allowed_operation_types: Optional[Iterable[OperationType]] = None,
208 ) -> ExecutionResult:
209 if allowed_operation_types is None:
210 allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
211
212 execution_context = ExecutionContext(
213 query=query,
214 schema=self,
215 context=context_value,
216 root_value=root_value,
217 variables=variable_values,
218 provided_operation_name=operation_name,
219 )
220
221 result = execute_sync(
222 self._schema,
223 query,
224 extensions=list(self.extensions) + [DirectivesExtensionSync],
225 execution_context_class=self.execution_context_class,
226 execution_context=execution_context,
227 allowed_operation_types=allowed_operation_types,
228 )
229
230 if result.errors:
231 self.process_errors(result.errors, execution_context=execution_context)
232
233 return result
234
235 async def subscribe(
236 self,
237 query: str,
238 variable_values: Optional[Dict[str, Any]] = None,
239 context_value: Optional[Any] = None,
240 root_value: Optional[Any] = None,
241 operation_name: Optional[str] = None,
242 ):
243 return await subscribe(
244 self._schema,
245 parse(query),
246 root_value=root_value,
247 context_value=context_value,
248 variable_values=variable_values,
249 operation_name=operation_name,
250 )
251
252 def as_str(self) -> str:
253 return print_schema(self)
254
255 __str__ = as_str
256
257 def introspect(self) -> Dict[str, Any]:
258 """Return the introspection query result for the current schema
259
260 Raises:
261 ValueError: If the introspection query fails due to an invalid schema
262 """
263 introspection = self.execute_sync(get_introspection_query())
264 if introspection.errors or not introspection.data:
265 raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}")
266
267 return introspection.data
268
[end of strawberry/schema/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py
--- a/strawberry/schema/schema.py
+++ b/strawberry/schema/schema.py
@@ -1,5 +1,5 @@
from functools import lru_cache
-from typing import Any, Dict, Iterable, Optional, Sequence, Type, Union
+from typing import Any, Dict, Iterable, List, Optional, Sequence, Type, Union
from graphql import (
ExecutionContext as GraphQLExecutionContext,
@@ -119,6 +119,16 @@
self.query = self.schema_converter.type_map[query_type.name]
+ def get_extensions(
+ self, sync: bool = False
+ ) -> List[Union[Type[Extension], Extension]]:
+ extensions = list(self.extensions)
+
+ if self.directives:
+ extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)
+
+ return extensions
+
@lru_cache()
def get_type_by_name( # type: ignore # lru_cache makes mypy complain
self, name: str
@@ -186,7 +196,7 @@
result = await execute(
self._schema,
query,
- extensions=list(self.extensions) + [DirectivesExtension],
+ extensions=self.get_extensions(),
execution_context_class=self.execution_context_class,
execution_context=execution_context,
allowed_operation_types=allowed_operation_types,
@@ -221,7 +231,7 @@
result = execute_sync(
self._schema,
query,
- extensions=list(self.extensions) + [DirectivesExtensionSync],
+ extensions=self.get_extensions(sync=True),
execution_context_class=self.execution_context_class,
execution_context=execution_context,
allowed_operation_types=allowed_operation_types,
| {"golden_diff": "diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py\n--- a/strawberry/schema/schema.py\n+++ b/strawberry/schema/schema.py\n@@ -1,5 +1,5 @@\n from functools import lru_cache\n-from typing import Any, Dict, Iterable, Optional, Sequence, Type, Union\n+from typing import Any, Dict, Iterable, List, Optional, Sequence, Type, Union\n \n from graphql import (\n ExecutionContext as GraphQLExecutionContext,\n@@ -119,6 +119,16 @@\n \n self.query = self.schema_converter.type_map[query_type.name]\n \n+ def get_extensions(\n+ self, sync: bool = False\n+ ) -> List[Union[Type[Extension], Extension]]:\n+ extensions = list(self.extensions)\n+\n+ if self.directives:\n+ extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)\n+\n+ return extensions\n+\n @lru_cache()\n def get_type_by_name( # type: ignore # lru_cache makes mypy complain\n self, name: str\n@@ -186,7 +196,7 @@\n result = await execute(\n self._schema,\n query,\n- extensions=list(self.extensions) + [DirectivesExtension],\n+ extensions=self.get_extensions(),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n@@ -221,7 +231,7 @@\n result = execute_sync(\n self._schema,\n query,\n- extensions=list(self.extensions) + [DirectivesExtensionSync],\n+ extensions=self.get_extensions(sync=True),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n", "issue": "Slow performance in async execution because of DirectivesExtension\nThe use of DirectivesExtension in async graphql execution wraps every field of the result graph in an async function. This slows down performance. \r\nWe have one resolver returning a huge list of objects where every field of the model is already filled with a native type (str, int ..). So there is no need for an asynchronous resolve of these model fields. \r\nBut the DirectivesExtension calls `await_maybe` for each field of each model in the result list. Now there are thousands of async function calls which should be awaited. \r\nWe don't use custom directives so I replaced the Schema.execute function (from strawberry.schema.schema.py). In this function I only register the DirectivesExtension if there are custom directives. For us this solves the problem, as long as we don't use custom directives. But I think it would be a modification that could also be merged in the strawberry sourcecode. \r\n\r\nHere is a demo script with the used patch, showing the dramatic performance improvement for this case. \r\n\r\n``` \r\nimport asyncio\r\nimport time\r\nimport typing\r\nfrom typing import Any, Dict, Optional\r\n\r\nimport strawberry\r\nfrom strawberry import Schema\r\nfrom strawberry.extensions.directives import DirectivesExtension\r\nfrom strawberry.schema.execute import execute\r\nfrom strawberry.types import ExecutionContext, ExecutionResult\r\n\r\n\r\[email protected]\r\nclass Book:\r\n author: str\r\n title: str\r\n\r\n\r\nasync def get_books():\r\n res = []\r\n await asyncio.sleep(0.1)\r\n for i in range(200000):\r\n res.append(\r\n Book(\r\n author=\"Unknown\",\r\n title=f\"Book nr {i}\",\r\n )\r\n )\r\n return res\r\n\r\n\r\[email protected]\r\nclass DemoQuery:\r\n books: typing.List[Book] = strawberry.field(resolver=get_books)\r\n\r\n\r\nasync def test():\r\n start_time = time.time()\r\n schema = strawberry.Schema(query=DemoQuery)\r\n query = \"{ books { title author } }\"\r\n result = await schema.execute(\r\n query,\r\n )\r\n if result.errors:\r\n print(result.errors)\r\n\r\n print(\"complete\", time.time() - start_time)\r\n\r\n\r\nasync def schema_execute(\r\n self,\r\n query: str,\r\n variable_values: Optional[Dict[str, Any]] = None,\r\n context_value: Optional[Any] = None,\r\n root_value: Optional[Any] = None,\r\n operation_name: Optional[str] = None,\r\n) -> ExecutionResult:\r\n # Create execution context\r\n execution_context = ExecutionContext(\r\n query=query,\r\n schema=self,\r\n context=context_value,\r\n root_value=root_value,\r\n variables=variable_values,\r\n provided_operation_name=operation_name,\r\n )\r\n\r\n # BEGIN - CHANGED\r\n extensions = list(self.extensions)\r\n if len(self.directives) > 0:\r\n extensions += [DirectivesExtension]\r\n\r\n result = await execute(\r\n self._schema, # pylint: disable=protected-access\r\n query,\r\n extensions=extensions,\r\n execution_context_class=self.execution_context_class,\r\n execution_context=execution_context,\r\n )\r\n # END - CHANGED\r\n\r\n if result.errors:\r\n self.process_errors(result.errors, execution_context=execution_context)\r\n\r\n return result\r\n\r\n\r\ndef apply_directives_patch():\r\n Schema.execute = schema_execute\r\n\r\n\r\ndef run():\r\n asyncio.run(test())\r\n\r\n\r\ndef run_with_patch():\r\n apply_directives_patch()\r\n asyncio.run(test())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"run\")\r\n run()\r\n print(\"run with patch\")\r\n run_with_patch()\r\n\r\n``` \r\n\r\nOn my Machine I get the following output:\r\n\r\n```\r\nrun\r\ncomplete 24.10178303718567\r\nrun with patch\r\ncomplete 4.264432191848755\r\n```\r\n\n", "before_files": [{"content": "from functools import lru_cache\nfrom typing import Any, Dict, Iterable, Optional, Sequence, Type, Union\n\nfrom graphql import (\n ExecutionContext as GraphQLExecutionContext,\n GraphQLNamedType,\n GraphQLNonNull,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.custom_scalar import ScalarDefinition, ScalarWrapper\nfrom strawberry.directive import StrawberryDirective\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.extensions import Extension\nfrom strawberry.extensions.directives import (\n DirectivesExtension,\n DirectivesExtensionSync,\n)\nfrom strawberry.field import StrawberryField\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY\nfrom strawberry.types import ExecutionContext, ExecutionResult\nfrom strawberry.types.graphql import OperationType\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.union import StrawberryUnion\n\nfrom ..printer import print_schema\nfrom .base import BaseSchema\nfrom .config import StrawberryConfig\nfrom .execute import execute, execute_sync\n\n\nDEFAULT_ALLOWED_OPERATION_TYPES = {\n OperationType.QUERY,\n OperationType.MUTATION,\n OperationType.SUBSCRIPTION,\n}\n\n\nclass Schema(BaseSchema):\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives: Sequence[StrawberryDirective] = (),\n types=(),\n extensions: Sequence[Union[Type[Extension], Extension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n config: Optional[StrawberryConfig] = None,\n scalar_overrides: Optional[\n Dict[object, Union[ScalarWrapper, ScalarDefinition]]\n ] = None,\n ):\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.config = config or StrawberryConfig()\n\n scalar_registry: Dict[object, Union[ScalarWrapper, ScalarDefinition]] = {\n **DEFAULT_SCALAR_REGISTRY\n }\n if scalar_overrides:\n scalar_registry.update(scalar_overrides)\n\n self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)\n self.directives = directives\n\n query_type = self.schema_converter.from_object(query._type_definition)\n mutation_type = (\n self.schema_converter.from_object(mutation._type_definition)\n if mutation\n else None\n )\n subscription_type = (\n self.schema_converter.from_object(subscription._type_definition)\n if subscription\n else None\n )\n\n graphql_directives = tuple(\n self.schema_converter.from_directive(directive) for directive in directives\n )\n\n graphql_types = []\n for type_ in types:\n graphql_type = self.schema_converter.from_maybe_optional(type_)\n if isinstance(graphql_type, GraphQLNonNull):\n graphql_type = graphql_type.of_type\n if not isinstance(graphql_type, GraphQLNamedType):\n raise TypeError(f\"{graphql_type} is not a named GraphQL Type\")\n graphql_types.append(graphql_type)\n\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + graphql_directives,\n types=graphql_types,\n extensions={\n GraphQLCoreConverter.DEFINITION_BACKREF: self,\n },\n )\n\n # attach our schema to the GraphQL schema instance\n self._schema._strawberry_schema = self # type: ignore\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n self.query = self.schema_converter.type_map[query_type.name]\n\n @lru_cache()\n def get_type_by_name( # type: ignore # lru_cache makes mypy complain\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n # TODO: respect auto_camel_case\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n def get_field_for_type(\n self, field_name: str, type_name: str\n ) -> Optional[StrawberryField]:\n type_ = self.get_type_by_name(type_name)\n\n if not type_:\n return None # pragma: no cover\n\n assert isinstance(type_, TypeDefinition)\n\n return next(\n (\n field\n for field in type_.fields\n if self.config.name_converter.get_graphql_name(field) == field_name\n ),\n None,\n )\n\n @lru_cache()\n def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:\n return next(\n (\n directive\n for directive in self.directives\n if self.config.name_converter.from_directive(directive) == graphql_name\n ),\n None,\n )\n\n async def execute(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n # Create execution context\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = await execute(\n self._schema,\n query,\n extensions=list(self.extensions) + [DirectivesExtension],\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n )\n\n if result.errors:\n self.process_errors(result.errors, execution_context=execution_context)\n\n return result\n\n def execute_sync(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = execute_sync(\n self._schema,\n query,\n extensions=list(self.extensions) + [DirectivesExtensionSync],\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n )\n\n if result.errors:\n self.process_errors(result.errors, execution_context=execution_context)\n\n return result\n\n async def subscribe(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}]} | 3,794 | 388 |
gh_patches_debug_5322 | rasdani/github-patches | git_diff | numpy__numpy-16730 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
f2py undefined symbol with PGI fortran compiler and MPI calls
<!-- Please describe the issue in detail here, and fill in the fields below -->
I am trying to wrap fortran code with f2py. The fortran code includes some MPI calls and is compiled with the PGI compiler.
When importing the wrapped module in python I get this error: `undefined symbol: mpi_comm_rank_`
### Reproducing code example:
<!-- A short code example that reproduces the problem/missing feature. It should be
self-contained, i.e., possible to run as-is via 'python myproblem.py' -->
```
1 ! file: helloworld.f90
2 subroutine sayhello(comm)
3 use mpi
4 implicit none
5 integer :: comm, rank, size, ierr
6 call MPI_Comm_size(comm, size, ierr)
7 call MPI_Comm_rank(comm, rank, ierr)
8 print *, 'Hello, World! I am process ',rank,' of ',size,'.'
9 end subroutine sayhello
```
### Compilation
`CC=gcc f2py --verbose --f90exec=mpif90 --fcompiler=pg -c helloworld.f90 -m helloworld`
### Partial output
```
[...]
<class 'numpy.distutils.fcompiler.pg.PGroupFCompiler'>
version_cmd = ['***/pgi/linuxpower/19.9/bin/pgfortran', '-V']
compiler_f77 = ['***/pgi/linuxpower/19.9/bin/pgfortran', '-fpic', '-Minform=inform', '-Mnosecond_underscore', '-fast']
compiler_fix = ['***/spectrum_mpi/10.03.00.01rtm3-rh7_20190611/bin/mpif90', '-Mfixed', '-fpic', '-Minform=inform', '-Mnosecond_underscore', '-fast']
compiler_f90 = ['***/spectrum_mpi/10.03.00.01rtm3-rh7_20190611/bin/mpif90', '-fpic', '-Minform=inform', '-Mnosecond_underscore', '-fast']
linker_so = ['***/pgi/linuxpower/19.9/bin/pgfortran', '-shared', '-fpic']
archiver = None
ranlib = None
linker_exe = None
version = LooseVersion ('19.9-0')
libraries = []
library_dirs = []
object_switch = '-o '
compile_switch = '-c'
include_dirs = ['***/python_venv/python_numpy_pgi_reproducer/include', '***/anaconda3/2019.03/include/python3.7m']
[...]
***/pgi/linuxpower/19.9/bin/pgfortran -shared -fpic /tmp/tmpfy731xy8/tmp/tmpfy731xy8/src.linux-ppc64le-3.7/helloworldmodule.o /tmp/tmpfy731xy8/tmp/tmpfy731xy8/src.linux-ppc64le-3.7/fortranobject.o /tmp/tmpfy731xy8/helloworld.o -o ./helloworld.cpython-37m-powerpc64le-linux-gnu.so
Removing build directory /tmp/tmpfy731xy8
```
The "linker_so" executable is chosen to be the pgfortran executable, whereas I would like to use the mpif90 executable instead.
This leads to the following error when importing the wrapped module from python:
```
$ python -c "import helloworld"
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: ***/helloworld.cpython-37m-powerpc64le-linux-gnu.so: undefined symbol: mpi_comm_rank_
```
### System informations
```
$ mpif90 --version
pgf90 19.9-0 linuxpower target on Linuxpower
```
```
$ python --version
Python 3.7.3
```
```
$ python -c "import sys, numpy; print(numpy.__version__, sys.version)"
1.18.4 3.7.3 (default, Mar 27 2019, 22:31:02)
[GCC 7.3.0]
```
### Workarround
This solves the problem in my case:
In file numpy/distutils/fcompiler/pg.py - line 36 (numpy installed version)
```
-'linker_so': ["pgfortran"],
+'linker_so': ["<F90>"],
```
The executable supplied with the `--f90exec` option is then the one picked to be the linker executable and the ImportError does not occur.
</issue>
<code>
[start of numpy/distutils/fcompiler/pg.py]
1 # http://www.pgroup.com
2 import sys
3
4 from numpy.distutils.fcompiler import FCompiler
5 from sys import platform
6 from os.path import join, dirname, normpath
7
8 compilers = ['PGroupFCompiler', 'PGroupFlangCompiler']
9
10
11 class PGroupFCompiler(FCompiler):
12
13 compiler_type = 'pg'
14 description = 'Portland Group Fortran Compiler'
15 version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
16
17 if platform == 'darwin':
18 executables = {
19 'version_cmd': ["<F77>", "-V"],
20 'compiler_f77': ["pgfortran", "-dynamiclib"],
21 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
22 'compiler_f90': ["pgfortran", "-dynamiclib"],
23 'linker_so': ["libtool"],
24 'archiver': ["ar", "-cr"],
25 'ranlib': ["ranlib"]
26 }
27 pic_flags = ['']
28 else:
29 executables = {
30 'version_cmd': ["<F77>", "-V"],
31 'compiler_f77': ["pgfortran"],
32 'compiler_fix': ["pgfortran", "-Mfixed"],
33 'compiler_f90': ["pgfortran"],
34 'linker_so': ["pgfortran"],
35 'archiver': ["ar", "-cr"],
36 'ranlib': ["ranlib"]
37 }
38 pic_flags = ['-fpic']
39
40 module_dir_switch = '-module '
41 module_include_switch = '-I'
42
43 def get_flags(self):
44 opt = ['-Minform=inform', '-Mnosecond_underscore']
45 return self.pic_flags + opt
46
47 def get_flags_opt(self):
48 return ['-fast']
49
50 def get_flags_debug(self):
51 return ['-g']
52
53 if platform == 'darwin':
54 def get_flags_linker_so(self):
55 return ["-dynamic", '-undefined', 'dynamic_lookup']
56
57 else:
58 def get_flags_linker_so(self):
59 return ["-shared", '-fpic']
60
61 def runtime_library_dir_option(self, dir):
62 return '-R%s' % dir
63
64
65 import functools
66
67 class PGroupFlangCompiler(FCompiler):
68 compiler_type = 'flang'
69 description = 'Portland Group Fortran LLVM Compiler'
70 version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
71
72 ar_exe = 'lib.exe'
73 possible_executables = ['flang']
74
75 executables = {
76 'version_cmd': ["<F77>", "--version"],
77 'compiler_f77': ["flang"],
78 'compiler_fix': ["flang"],
79 'compiler_f90': ["flang"],
80 'linker_so': [None],
81 'archiver': [ar_exe, "/verbose", "/OUT:"],
82 'ranlib': None
83 }
84
85 library_switch = '/OUT:' # No space after /OUT:!
86 module_dir_switch = '-module ' # Don't remove ending space!
87
88 def get_libraries(self):
89 opt = FCompiler.get_libraries(self)
90 opt.extend(['flang', 'flangrti', 'ompstub'])
91 return opt
92
93 @functools.lru_cache(maxsize=128)
94 def get_library_dirs(self):
95 """List of compiler library directories."""
96 opt = FCompiler.get_library_dirs(self)
97 flang_dir = dirname(self.executables['compiler_f77'][0])
98 opt.append(normpath(join(flang_dir, '..', 'lib')))
99
100 return opt
101
102 def get_flags(self):
103 return []
104
105 def get_flags_free(self):
106 return []
107
108 def get_flags_debug(self):
109 return ['-g']
110
111 def get_flags_opt(self):
112 return ['-O3']
113
114 def get_flags_arch(self):
115 return []
116
117 def runtime_library_dir_option(self, dir):
118 raise NotImplementedError
119
120
121 if __name__ == '__main__':
122 from distutils import log
123 log.set_verbosity(2)
124 from numpy.distutils import customized_fcompiler
125 if 'flang' in sys.argv:
126 print(customized_fcompiler(compiler='flang').get_version())
127 else:
128 print(customized_fcompiler(compiler='pg').get_version())
129
[end of numpy/distutils/fcompiler/pg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -31,7 +31,7 @@
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
- 'linker_so': ["pgfortran"],
+ 'linker_so': ["<F90>"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
| {"golden_diff": "diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py\n--- a/numpy/distutils/fcompiler/pg.py\n+++ b/numpy/distutils/fcompiler/pg.py\n@@ -31,7 +31,7 @@\n 'compiler_f77': [\"pgfortran\"],\n 'compiler_fix': [\"pgfortran\", \"-Mfixed\"],\n 'compiler_f90': [\"pgfortran\"],\n- 'linker_so': [\"pgfortran\"],\n+ 'linker_so': [\"<F90>\"],\n 'archiver': [\"ar\", \"-cr\"],\n 'ranlib': [\"ranlib\"]\n }\n", "issue": "f2py undefined symbol with PGI fortran compiler and MPI calls\n<!-- Please describe the issue in detail here, and fill in the fields below -->\r\nI am trying to wrap fortran code with f2py. The fortran code includes some MPI calls and is compiled with the PGI compiler.\r\nWhen importing the wrapped module in python I get this error: `undefined symbol: mpi_comm_rank_`\r\n\r\n### Reproducing code example:\r\n\r\n<!-- A short code example that reproduces the problem/missing feature. It should be\r\nself-contained, i.e., possible to run as-is via 'python myproblem.py' -->\r\n\r\n```\r\n1 ! file: helloworld.f90\r\n2 subroutine sayhello(comm)\r\n3 use mpi\r\n4 implicit none\r\n5 integer :: comm, rank, size, ierr\r\n6 call MPI_Comm_size(comm, size, ierr)\r\n7 call MPI_Comm_rank(comm, rank, ierr)\r\n8 print *, 'Hello, World! I am process ',rank,' of ',size,'.'\r\n9 end subroutine sayhello\r\n```\r\n### Compilation\r\n`CC=gcc f2py --verbose --f90exec=mpif90 --fcompiler=pg -c helloworld.f90 -m helloworld`\r\n\r\n### Partial output\r\n```\r\n[...]\r\n<class 'numpy.distutils.fcompiler.pg.PGroupFCompiler'>\r\nversion_cmd = ['***/pgi/linuxpower/19.9/bin/pgfortran', '-V']\r\ncompiler_f77 = ['***/pgi/linuxpower/19.9/bin/pgfortran', '-fpic', '-Minform=inform', '-Mnosecond_underscore', '-fast']\r\ncompiler_fix = ['***/spectrum_mpi/10.03.00.01rtm3-rh7_20190611/bin/mpif90', '-Mfixed', '-fpic', '-Minform=inform', '-Mnosecond_underscore', '-fast']\r\ncompiler_f90 = ['***/spectrum_mpi/10.03.00.01rtm3-rh7_20190611/bin/mpif90', '-fpic', '-Minform=inform', '-Mnosecond_underscore', '-fast']\r\nlinker_so = ['***/pgi/linuxpower/19.9/bin/pgfortran', '-shared', '-fpic']\r\narchiver = None\r\nranlib = None\r\nlinker_exe = None\r\nversion = LooseVersion ('19.9-0')\r\nlibraries = []\r\nlibrary_dirs = []\r\nobject_switch = '-o '\r\ncompile_switch = '-c'\r\ninclude_dirs = ['***/python_venv/python_numpy_pgi_reproducer/include', '***/anaconda3/2019.03/include/python3.7m']\r\n[...]\r\n***/pgi/linuxpower/19.9/bin/pgfortran -shared -fpic /tmp/tmpfy731xy8/tmp/tmpfy731xy8/src.linux-ppc64le-3.7/helloworldmodule.o /tmp/tmpfy731xy8/tmp/tmpfy731xy8/src.linux-ppc64le-3.7/fortranobject.o /tmp/tmpfy731xy8/helloworld.o -o ./helloworld.cpython-37m-powerpc64le-linux-gnu.so\r\nRemoving build directory /tmp/tmpfy731xy8\r\n```\r\nThe \"linker_so\" executable is chosen to be the pgfortran executable, whereas I would like to use the mpif90 executable instead.\r\nThis leads to the following error when importing the wrapped module from python:\r\n\r\n```\r\n$ python -c \"import helloworld\"\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: ***/helloworld.cpython-37m-powerpc64le-linux-gnu.so: undefined symbol: mpi_comm_rank_\r\n```\r\n\r\n### System informations\r\n\r\n```\r\n$ mpif90 --version\r\npgf90 19.9-0 linuxpower target on Linuxpower\r\n```\r\n\r\n```\r\n$ python --version\r\nPython 3.7.3\r\n```\r\n```\r\n$ python -c \"import sys, numpy; print(numpy.__version__, sys.version)\"\r\n1.18.4 3.7.3 (default, Mar 27 2019, 22:31:02)\r\n[GCC 7.3.0]\r\n```\r\n### Workarround\r\nThis solves the problem in my case:\r\nIn file numpy/distutils/fcompiler/pg.py - line 36 (numpy installed version)\r\n```\r\n-'linker_so': [\"pgfortran\"],\r\n+'linker_so': [\"<F90>\"],\r\n```\r\nThe executable supplied with the `--f90exec` option is then the one picked to be the linker executable and the ImportError does not occur.\r\n\n", "before_files": [{"content": "# http://www.pgroup.com\nimport sys\n\nfrom numpy.distutils.fcompiler import FCompiler\nfrom sys import platform\nfrom os.path import join, dirname, normpath\n\ncompilers = ['PGroupFCompiler', 'PGroupFlangCompiler']\n\n\nclass PGroupFCompiler(FCompiler):\n\n compiler_type = 'pg'\n description = 'Portland Group Fortran Compiler'\n version_pattern = r'\\s*pg(f77|f90|hpf|fortran) (?P<version>[\\d.-]+).*'\n\n if platform == 'darwin':\n executables = {\n 'version_cmd': [\"<F77>\", \"-V\"],\n 'compiler_f77': [\"pgfortran\", \"-dynamiclib\"],\n 'compiler_fix': [\"pgfortran\", \"-Mfixed\", \"-dynamiclib\"],\n 'compiler_f90': [\"pgfortran\", \"-dynamiclib\"],\n 'linker_so': [\"libtool\"],\n 'archiver': [\"ar\", \"-cr\"],\n 'ranlib': [\"ranlib\"]\n }\n pic_flags = ['']\n else:\n executables = {\n 'version_cmd': [\"<F77>\", \"-V\"],\n 'compiler_f77': [\"pgfortran\"],\n 'compiler_fix': [\"pgfortran\", \"-Mfixed\"],\n 'compiler_f90': [\"pgfortran\"],\n 'linker_so': [\"pgfortran\"],\n 'archiver': [\"ar\", \"-cr\"],\n 'ranlib': [\"ranlib\"]\n }\n pic_flags = ['-fpic']\n\n module_dir_switch = '-module '\n module_include_switch = '-I'\n\n def get_flags(self):\n opt = ['-Minform=inform', '-Mnosecond_underscore']\n return self.pic_flags + opt\n\n def get_flags_opt(self):\n return ['-fast']\n\n def get_flags_debug(self):\n return ['-g']\n\n if platform == 'darwin':\n def get_flags_linker_so(self):\n return [\"-dynamic\", '-undefined', 'dynamic_lookup']\n\n else:\n def get_flags_linker_so(self):\n return [\"-shared\", '-fpic']\n\n def runtime_library_dir_option(self, dir):\n return '-R%s' % dir\n\n\nimport functools\n\nclass PGroupFlangCompiler(FCompiler):\n compiler_type = 'flang'\n description = 'Portland Group Fortran LLVM Compiler'\n version_pattern = r'\\s*(flang|clang) version (?P<version>[\\d.-]+).*'\n\n ar_exe = 'lib.exe'\n possible_executables = ['flang']\n\n executables = {\n 'version_cmd': [\"<F77>\", \"--version\"],\n 'compiler_f77': [\"flang\"],\n 'compiler_fix': [\"flang\"],\n 'compiler_f90': [\"flang\"],\n 'linker_so': [None],\n 'archiver': [ar_exe, \"/verbose\", \"/OUT:\"],\n 'ranlib': None\n }\n\n library_switch = '/OUT:' # No space after /OUT:!\n module_dir_switch = '-module ' # Don't remove ending space!\n\n def get_libraries(self):\n opt = FCompiler.get_libraries(self)\n opt.extend(['flang', 'flangrti', 'ompstub'])\n return opt\n\n @functools.lru_cache(maxsize=128)\n def get_library_dirs(self):\n \"\"\"List of compiler library directories.\"\"\"\n opt = FCompiler.get_library_dirs(self)\n flang_dir = dirname(self.executables['compiler_f77'][0])\n opt.append(normpath(join(flang_dir, '..', 'lib')))\n\n return opt\n\n def get_flags(self):\n return []\n\n def get_flags_free(self):\n return []\n\n def get_flags_debug(self):\n return ['-g']\n\n def get_flags_opt(self):\n return ['-O3']\n\n def get_flags_arch(self):\n return []\n\n def runtime_library_dir_option(self, dir):\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n from distutils import log\n log.set_verbosity(2)\n from numpy.distutils import customized_fcompiler\n if 'flang' in sys.argv:\n print(customized_fcompiler(compiler='flang').get_version())\n else:\n print(customized_fcompiler(compiler='pg').get_version())\n", "path": "numpy/distutils/fcompiler/pg.py"}]} | 2,822 | 142 |
gh_patches_debug_38735 | rasdani/github-patches | git_diff | sktime__sktime-2925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ENH] VAR and VECM models
@fkiraly update: almost done, all that is missing is `predict_interval` for VECM.
---
@fkiraly's summary of the discussion:
We should interface VAR and VECM from `statsmodels`.
- [x] VAR #1083, #1491
- [x] VECM #1141, #2829
---
Have you considered porting `Vector Autoregressions (VAR)` and `Vector Error Correction Models (VECM)` from [statsmodels](https://www.statsmodels.org/dev/vector_ar.html#)? This would be a great addition. Thank you
</issue>
<code>
[start of sktime/forecasting/vecm.py]
1 # -*- coding: utf-8 -*-
2 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
3
4 """VECM Forecaster."""
5
6 __all__ = ["VECM"]
7 __author__ = ["thayeylolu", "AurumnPegasus"]
8
9 import numpy as np
10 import pandas as pd
11 from statsmodels.tsa.vector_ar.vecm import VECM as _VECM
12
13 from sktime.forecasting.base.adapters import _StatsModelsAdapter
14
15
16 class VECM(_StatsModelsAdapter):
17 r"""
18 A VECM model, or Vector Error Correction Model, is a restricted.
19
20 VAR model used for nonstationary series that are cointegrated.r
21
22 Parameters
23 ----------
24 dates : array_like of datetime, optional
25 See :class:`statsmodels.tsa.base.tsa_model.TimeSeriesModel` for more
26 information.
27 freq : str, optional
28 See :class:`statsmodels.tsa.base.tsa_model.TimeSeriesModel` for more
29 information.
30 missing : str, optional
31 See :class:`statsmodels.base.model.Model` for more information.
32 k_ar_diff : int
33 Number of lagged differences in the model. Equals :math:`k_{ar} - 1` in
34 the formula above.
35 coint_rank : int
36 Cointegration rank, equals the rank of the matrix :math:`\\Pi` and the
37 number of columns of :math:`\\alpha` and :math:`\\beta`.
38 deterministic : str {``"n"``, ``"co"``, ``"ci"``, ``"lo"``, ``"li"``}
39 * ``"n"`` - no deterministic terms
40 * ``"co"`` - constant outside the cointegration relation
41 * ``"ci"`` - constant within the cointegration relation
42 * ``"lo"`` - linear trend outside the cointegration relation
43 * ``"li"`` - linear trend within the cointegration relation
44
45 Combinations of these are possible (e.g. ``"cili"`` or ``"colo"`` for
46 linear trend with intercept). When using a constant term you have to
47 choose whether you want to restrict it to the cointegration relation
48 (i.e. ``"ci"``) or leave it unrestricted (i.e. ``"co"``). Do not use
49 both ``"ci"`` and ``"co"``. The same applies for ``"li"`` and ``"lo"``
50 when using a linear term. See the Notes-section for more information.
51 seasons : int, default: 0
52 Number of periods in a seasonal cycle. 0 means no seasons.
53 first_season : int, default: 0
54 Season of the first observation.
55 method : str {"ml"}, default: "ml"
56 Estimation method to use. "ml" stands for Maximum Likelihood.
57 exog_coint : a scalar (float), 1D ndarray of size nobs,
58 2D ndarray/pd.DataFrame of size (any, neqs)
59 Deterministic terms inside the cointegration relation.
60 exog_coint_fc : a scalar (float), 1D ndarray of size nobs,
61 2D ndarray/pd.DataFrame of size (any, neqs)
62 Forcasted value of exog_coint
63
64 Example
65 -------
66 >>> from sktime.forecasting.vecm import VECM
67 >>> from sktime.forecasting.model_selection import temporal_train_test_split
68 >>> from sktime.forecasting.base import ForecastingHorizon
69 >>> index = pd.date_range(start="2005", end="2006-12", freq="M")
70 >>> df = pd.DataFrame(np.random.randint(0, 100, size=(23, 2)),
71 ... columns=list("AB"),
72 ... index=pd.PeriodIndex(index))
73 >>> train, test = temporal_train_test_split(df)
74 >>> sktime_model = VECM()
75 >>> fh = ForecastingHorizon([1, 3, 4, 5, 7, 9])
76 >>> _ = sktime_model.fit(train, fh=fh)
77 >>> fc2 = sktime_model.predict(fh=fh)
78 """
79
80 _tags = {
81 "scitype:y": "multivariate",
82 "y_inner_mtype": "pd.DataFrame",
83 "X_inner_mtype": "pd.DataFrame",
84 "requires-fh-in-fit": False,
85 "univariate-only": False,
86 "ignores-exogeneous-X": False,
87 }
88
89 def __init__(
90 self,
91 dates=None,
92 freq=None,
93 missing="none",
94 k_ar_diff=1,
95 coint_rank=1,
96 deterministic="n",
97 seasons=0,
98 first_season=0,
99 method="ml",
100 exog_coint=None,
101 exog_coint_fc=None,
102 ):
103
104 self.dates = dates
105 self.freq = freq
106 self.missing = missing
107 self.k_ar_diff = k_ar_diff
108 self.coint_rank = coint_rank
109 self.deterministic = deterministic
110 self.seasons = seasons
111 self.first_season = first_season
112 self.method = method
113 self.exog_coint = exog_coint
114 self.exog_coint_fc = exog_coint_fc
115
116 super(VECM, self).__init__()
117
118 def _fit(self, y, fh=None, X=None):
119 """
120 Fit forecaster to training data.
121
122 Wrapper for statsmodel's VECM (_VECM) fit method
123
124 Parameters
125 ----------
126 y : pd.DataFrame, guaranteed to have 2 or more columns
127 Time series to which to fit the forecaster.
128 fh : guaranteed to be ForecastingHorizon
129 The forecasting horizon with the steps ahead to to predict.
130 Required (non-optional) here if self.get_tag("requires-fh-in-fit")==True
131 Otherwise, if not passed in _fit, guaranteed to be passed in _predict
132 X : pd.DataFrame, optional (default=None)
133 Exogeneous time series to fit to.
134
135 Returns
136 -------
137 self : reference to self
138 """
139 self._forecaster = _VECM(
140 endog=y,
141 exog=X,
142 exog_coint=self.exog_coint,
143 dates=self.dates,
144 freq=self.freq,
145 missing=self.missing,
146 k_ar_diff=self.k_ar_diff,
147 coint_rank=self.coint_rank,
148 deterministic=self.deterministic,
149 seasons=self.seasons,
150 first_season=self.first_season,
151 )
152
153 self._fitted_forecaster = self._forecaster.fit(method=self.method)
154 return self
155
156 def _predict(self, fh, X=None):
157 """
158 Forecast time series at future horizon.
159
160 Wrapper for statsmodel's VECM (_VECM) predict method
161
162 Parameters
163 ----------
164 fh : guaranteed to be ForecastingHorizon
165 The forecasting horizon with the steps ahead to to predict.
166 If not passed in _fit, guaranteed to be passed here
167 X : optional (default=None)
168 guaranteed to be of a type in self.get_tag("X_inner_mtype")
169 Exogeneous time series for the forecast
170
171 Returns
172 -------
173 y_pred : pd.Series
174 Point predictions
175 """
176 y_pred_outsample = None
177 y_pred_insample = None
178 exog_fc = X.values if X is not None else None
179 fh_int = fh.to_relative(self.cutoff)
180
181 # out-sample prediction
182 if fh_int.max() > 0:
183 y_pred_outsample = self._fitted_forecaster.predict(
184 steps=fh_int[-1],
185 exog_fc=exog_fc,
186 exog_coint_fc=self.exog_coint_fc,
187 )
188
189 # in-sample prediction by means of residuals
190 if fh_int.min() <= 0:
191
192 # .resid returns np.ndarray
193 # both values need to be pd DataFrame for subtraction
194 y_pred_insample = self._y - pd.DataFrame(self._fitted_forecaster.resid)
195 y_pred_insample = y_pred_insample.values
196
197 if y_pred_insample is not None and y_pred_outsample is not None:
198 y_pred = np.concatenate([y_pred_outsample, y_pred_insample], axis=0)
199 else:
200 y_pred = (
201 y_pred_insample if y_pred_insample is not None else y_pred_outsample
202 )
203
204 index = fh.to_absolute(self.cutoff)
205 index.name = self._y.index.name
206 y_pred = pd.DataFrame(
207 y_pred[fh.to_indexer(self.cutoff), :],
208 index=fh.to_absolute(self.cutoff),
209 columns=self._y.columns,
210 )
211
212 return y_pred
213
[end of sktime/forecasting/vecm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/forecasting/vecm.py b/sktime/forecasting/vecm.py
--- a/sktime/forecasting/vecm.py
+++ b/sktime/forecasting/vecm.py
@@ -3,6 +3,7 @@
"""VECM Forecaster."""
+
__all__ = ["VECM"]
__author__ = ["thayeylolu", "AurumnPegasus"]
@@ -84,6 +85,7 @@
"requires-fh-in-fit": False,
"univariate-only": False,
"ignores-exogeneous-X": False,
+ "capability:pred_int": True,
}
def __init__(
@@ -210,3 +212,70 @@
)
return y_pred
+
+ def _predict_interval(self, fh, X=None, coverage=None):
+ """
+ Compute/return prediction quantiles for a forecast.
+
+ private _predict_interval containing the core logic,
+ called from predict_interval and possibly predict_quantiles
+ State required:
+ Requires state to be "fitted".
+ Accesses in self:
+ Fitted model attributes ending in "_"
+ self.cutoff
+
+ Parameters
+ ----------
+ fh : guaranteed to be ForecastingHorizon
+ The forecasting horizon with the steps ahead to to predict.
+ X : optional (default=None)
+ guaranteed to be of a type in self.get_tag("X_inner_mtype")
+ Exogeneous time series for the forecast
+ coverage : list of float (guaranteed not None and floats in [0,1] interval)
+ nominal coverage(s) of predictive interval(s)
+
+ Returns
+ -------
+ pred_int : pd.DataFrame
+ Column has multi-index: first level is variable name from y in fit,
+ second level coverage fractions for which intervals were computed.
+ in the same order as in input `coverage`.
+ Third level is string "lower" or "upper", for lower/upper interval end.
+ Row index is fh, with additional (upper) levels equal to instance levels,
+ from y seen in fit, if y_inner_mtype is Panel or Hierarchical.
+ Entries are forecasts of lower/upper interval end,
+ for var in col index, at nominal coverage in second col index,
+ lower/upper depending on third col index, for the row index.
+ Upper/lower interval end forecasts are equivalent to
+ quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.
+ """
+ exog_fc = X.values if X is not None else None
+ fh_oos = fh.to_out_of_sample(self.cutoff)
+ var_names = (
+ self._y.index.name
+ if self._y.index.name is not None
+ else self._y.columns.values
+ )
+ int_idx = pd.MultiIndex.from_product([var_names, coverage, ["lower", "upper"]])
+ # pred_int = pd.DataFrame(index=int_idx)
+
+ for c in coverage:
+ alpha = 1 - c
+ _, y_lower, y_upper = self._fitted_forecaster.predict(
+ steps=fh_oos[-1],
+ exog_fc=exog_fc,
+ exog_coint_fc=self.exog_coint_fc,
+ alpha=alpha,
+ )
+ values = []
+ for v_idx in range(len(var_names)):
+ values.append(y_lower[0][v_idx])
+ values.append(y_upper[0][v_idx])
+ # pred_int.loc[(var_names[v_idx], c, "lower"), :] = (y_lower[0][v_idx])
+ # pred_int.loc[(var_names[v_idx], c, "upper"), :] = (y_upper[0][v_idx])
+ pred_int = pd.DataFrame(
+ [values], index=fh.to_absolute(self.cutoff), columns=int_idx
+ )
+
+ return pred_int
| {"golden_diff": "diff --git a/sktime/forecasting/vecm.py b/sktime/forecasting/vecm.py\n--- a/sktime/forecasting/vecm.py\n+++ b/sktime/forecasting/vecm.py\n@@ -3,6 +3,7 @@\n \n \"\"\"VECM Forecaster.\"\"\"\n \n+\n __all__ = [\"VECM\"]\n __author__ = [\"thayeylolu\", \"AurumnPegasus\"]\n \n@@ -84,6 +85,7 @@\n \"requires-fh-in-fit\": False,\n \"univariate-only\": False,\n \"ignores-exogeneous-X\": False,\n+ \"capability:pred_int\": True,\n }\n \n def __init__(\n@@ -210,3 +212,70 @@\n )\n \n return y_pred\n+\n+ def _predict_interval(self, fh, X=None, coverage=None):\n+ \"\"\"\n+ Compute/return prediction quantiles for a forecast.\n+\n+ private _predict_interval containing the core logic,\n+ called from predict_interval and possibly predict_quantiles\n+ State required:\n+ Requires state to be \"fitted\".\n+ Accesses in self:\n+ Fitted model attributes ending in \"_\"\n+ self.cutoff\n+\n+ Parameters\n+ ----------\n+ fh : guaranteed to be ForecastingHorizon\n+ The forecasting horizon with the steps ahead to to predict.\n+ X : optional (default=None)\n+ guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n+ Exogeneous time series for the forecast\n+ coverage : list of float (guaranteed not None and floats in [0,1] interval)\n+ nominal coverage(s) of predictive interval(s)\n+\n+ Returns\n+ -------\n+ pred_int : pd.DataFrame\n+ Column has multi-index: first level is variable name from y in fit,\n+ second level coverage fractions for which intervals were computed.\n+ in the same order as in input `coverage`.\n+ Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n+ Row index is fh, with additional (upper) levels equal to instance levels,\n+ from y seen in fit, if y_inner_mtype is Panel or Hierarchical.\n+ Entries are forecasts of lower/upper interval end,\n+ for var in col index, at nominal coverage in second col index,\n+ lower/upper depending on third col index, for the row index.\n+ Upper/lower interval end forecasts are equivalent to\n+ quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n+ \"\"\"\n+ exog_fc = X.values if X is not None else None\n+ fh_oos = fh.to_out_of_sample(self.cutoff)\n+ var_names = (\n+ self._y.index.name\n+ if self._y.index.name is not None\n+ else self._y.columns.values\n+ )\n+ int_idx = pd.MultiIndex.from_product([var_names, coverage, [\"lower\", \"upper\"]])\n+ # pred_int = pd.DataFrame(index=int_idx)\n+\n+ for c in coverage:\n+ alpha = 1 - c\n+ _, y_lower, y_upper = self._fitted_forecaster.predict(\n+ steps=fh_oos[-1],\n+ exog_fc=exog_fc,\n+ exog_coint_fc=self.exog_coint_fc,\n+ alpha=alpha,\n+ )\n+ values = []\n+ for v_idx in range(len(var_names)):\n+ values.append(y_lower[0][v_idx])\n+ values.append(y_upper[0][v_idx])\n+ # pred_int.loc[(var_names[v_idx], c, \"lower\"), :] = (y_lower[0][v_idx])\n+ # pred_int.loc[(var_names[v_idx], c, \"upper\"), :] = (y_upper[0][v_idx])\n+ pred_int = pd.DataFrame(\n+ [values], index=fh.to_absolute(self.cutoff), columns=int_idx\n+ )\n+\n+ return pred_int\n", "issue": "[ENH] VAR and VECM models\n@fkiraly update: almost done, all that is missing is `predict_interval` for VECM.\r\n\r\n---\r\n\r\n@fkiraly's summary of the discussion:\r\n\r\nWe should interface VAR and VECM from `statsmodels`.\r\n\r\n- [x] VAR #1083, #1491\r\n- [x] VECM #1141, #2829\r\n\r\n---\r\n\r\nHave you considered porting `Vector Autoregressions (VAR)` and `Vector Error Correction Models (VECM)` from [statsmodels](https://www.statsmodels.org/dev/vector_ar.html#)? This would be a great addition. Thank you\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n\"\"\"VECM Forecaster.\"\"\"\n\n__all__ = [\"VECM\"]\n__author__ = [\"thayeylolu\", \"AurumnPegasus\"]\n\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.tsa.vector_ar.vecm import VECM as _VECM\n\nfrom sktime.forecasting.base.adapters import _StatsModelsAdapter\n\n\nclass VECM(_StatsModelsAdapter):\n r\"\"\"\n A VECM model, or Vector Error Correction Model, is a restricted.\n\n VAR model used for nonstationary series that are cointegrated.r\n\n Parameters\n ----------\n dates : array_like of datetime, optional\n See :class:`statsmodels.tsa.base.tsa_model.TimeSeriesModel` for more\n information.\n freq : str, optional\n See :class:`statsmodels.tsa.base.tsa_model.TimeSeriesModel` for more\n information.\n missing : str, optional\n See :class:`statsmodels.base.model.Model` for more information.\n k_ar_diff : int\n Number of lagged differences in the model. Equals :math:`k_{ar} - 1` in\n the formula above.\n coint_rank : int\n Cointegration rank, equals the rank of the matrix :math:`\\\\Pi` and the\n number of columns of :math:`\\\\alpha` and :math:`\\\\beta`.\n deterministic : str {``\"n\"``, ``\"co\"``, ``\"ci\"``, ``\"lo\"``, ``\"li\"``}\n * ``\"n\"`` - no deterministic terms\n * ``\"co\"`` - constant outside the cointegration relation\n * ``\"ci\"`` - constant within the cointegration relation\n * ``\"lo\"`` - linear trend outside the cointegration relation\n * ``\"li\"`` - linear trend within the cointegration relation\n\n Combinations of these are possible (e.g. ``\"cili\"`` or ``\"colo\"`` for\n linear trend with intercept). When using a constant term you have to\n choose whether you want to restrict it to the cointegration relation\n (i.e. ``\"ci\"``) or leave it unrestricted (i.e. ``\"co\"``). Do not use\n both ``\"ci\"`` and ``\"co\"``. The same applies for ``\"li\"`` and ``\"lo\"``\n when using a linear term. See the Notes-section for more information.\n seasons : int, default: 0\n Number of periods in a seasonal cycle. 0 means no seasons.\n first_season : int, default: 0\n Season of the first observation.\n method : str {\"ml\"}, default: \"ml\"\n Estimation method to use. \"ml\" stands for Maximum Likelihood.\n exog_coint : a scalar (float), 1D ndarray of size nobs,\n 2D ndarray/pd.DataFrame of size (any, neqs)\n Deterministic terms inside the cointegration relation.\n exog_coint_fc : a scalar (float), 1D ndarray of size nobs,\n 2D ndarray/pd.DataFrame of size (any, neqs)\n Forcasted value of exog_coint\n\n Example\n -------\n >>> from sktime.forecasting.vecm import VECM\n >>> from sktime.forecasting.model_selection import temporal_train_test_split\n >>> from sktime.forecasting.base import ForecastingHorizon\n >>> index = pd.date_range(start=\"2005\", end=\"2006-12\", freq=\"M\")\n >>> df = pd.DataFrame(np.random.randint(0, 100, size=(23, 2)),\n ... columns=list(\"AB\"),\n ... index=pd.PeriodIndex(index))\n >>> train, test = temporal_train_test_split(df)\n >>> sktime_model = VECM()\n >>> fh = ForecastingHorizon([1, 3, 4, 5, 7, 9])\n >>> _ = sktime_model.fit(train, fh=fh)\n >>> fc2 = sktime_model.predict(fh=fh)\n \"\"\"\n\n _tags = {\n \"scitype:y\": \"multivariate\",\n \"y_inner_mtype\": \"pd.DataFrame\",\n \"X_inner_mtype\": \"pd.DataFrame\",\n \"requires-fh-in-fit\": False,\n \"univariate-only\": False,\n \"ignores-exogeneous-X\": False,\n }\n\n def __init__(\n self,\n dates=None,\n freq=None,\n missing=\"none\",\n k_ar_diff=1,\n coint_rank=1,\n deterministic=\"n\",\n seasons=0,\n first_season=0,\n method=\"ml\",\n exog_coint=None,\n exog_coint_fc=None,\n ):\n\n self.dates = dates\n self.freq = freq\n self.missing = missing\n self.k_ar_diff = k_ar_diff\n self.coint_rank = coint_rank\n self.deterministic = deterministic\n self.seasons = seasons\n self.first_season = first_season\n self.method = method\n self.exog_coint = exog_coint\n self.exog_coint_fc = exog_coint_fc\n\n super(VECM, self).__init__()\n\n def _fit(self, y, fh=None, X=None):\n \"\"\"\n Fit forecaster to training data.\n\n Wrapper for statsmodel's VECM (_VECM) fit method\n\n Parameters\n ----------\n y : pd.DataFrame, guaranteed to have 2 or more columns\n Time series to which to fit the forecaster.\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n Required (non-optional) here if self.get_tag(\"requires-fh-in-fit\")==True\n Otherwise, if not passed in _fit, guaranteed to be passed in _predict\n X : pd.DataFrame, optional (default=None)\n Exogeneous time series to fit to.\n\n Returns\n -------\n self : reference to self\n \"\"\"\n self._forecaster = _VECM(\n endog=y,\n exog=X,\n exog_coint=self.exog_coint,\n dates=self.dates,\n freq=self.freq,\n missing=self.missing,\n k_ar_diff=self.k_ar_diff,\n coint_rank=self.coint_rank,\n deterministic=self.deterministic,\n seasons=self.seasons,\n first_season=self.first_season,\n )\n\n self._fitted_forecaster = self._forecaster.fit(method=self.method)\n return self\n\n def _predict(self, fh, X=None):\n \"\"\"\n Forecast time series at future horizon.\n\n Wrapper for statsmodel's VECM (_VECM) predict method\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n If not passed in _fit, guaranteed to be passed here\n X : optional (default=None)\n guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n Exogeneous time series for the forecast\n\n Returns\n -------\n y_pred : pd.Series\n Point predictions\n \"\"\"\n y_pred_outsample = None\n y_pred_insample = None\n exog_fc = X.values if X is not None else None\n fh_int = fh.to_relative(self.cutoff)\n\n # out-sample prediction\n if fh_int.max() > 0:\n y_pred_outsample = self._fitted_forecaster.predict(\n steps=fh_int[-1],\n exog_fc=exog_fc,\n exog_coint_fc=self.exog_coint_fc,\n )\n\n # in-sample prediction by means of residuals\n if fh_int.min() <= 0:\n\n # .resid returns np.ndarray\n # both values need to be pd DataFrame for subtraction\n y_pred_insample = self._y - pd.DataFrame(self._fitted_forecaster.resid)\n y_pred_insample = y_pred_insample.values\n\n if y_pred_insample is not None and y_pred_outsample is not None:\n y_pred = np.concatenate([y_pred_outsample, y_pred_insample], axis=0)\n else:\n y_pred = (\n y_pred_insample if y_pred_insample is not None else y_pred_outsample\n )\n\n index = fh.to_absolute(self.cutoff)\n index.name = self._y.index.name\n y_pred = pd.DataFrame(\n y_pred[fh.to_indexer(self.cutoff), :],\n index=fh.to_absolute(self.cutoff),\n columns=self._y.columns,\n )\n\n return y_pred\n", "path": "sktime/forecasting/vecm.py"}]} | 3,128 | 893 |
gh_patches_debug_29329 | rasdani/github-patches | git_diff | streamlink__streamlink-2326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Streamlink no longer provides streamlink-script.py
As of version 0.14.0 streamlink does not install the streamlink-script.py script,
because of this streamlink-twitch-gui does not work as it cannot load the script.
This may an issue in streamlink, as there is no mention of its removal in the changelog.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 from os import environ
4 from os import path
5 from sys import path as sys_path
6
7 from setuptools import setup, find_packages
8
9 import versioneer
10
11 deps = [
12 # Require backport of concurrent.futures on Python 2
13 'futures;python_version<"3.0"',
14 # Require singledispatch on Python <3.4
15 'singledispatch;python_version<"3.4"',
16 "requests>=2.21.0,<3.0",
17 'urllib3[secure]>=1.23;python_version<"3.0"',
18 "isodate",
19 "websocket-client",
20 # Support for SOCKS proxies
21 "PySocks!=1.5.7,>=1.5.6",
22 # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet
23 # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x
24 'win-inet-pton;python_version<"3.0" and platform_system=="Windows"',
25 # shutil.get_terminal_size and which were added in Python 3.3
26 'backports.shutil_which;python_version<"3.3"',
27 'backports.shutil_get_terminal_size;python_version<"3.3"'
28 ]
29
30 # for encrypted streams
31 if environ.get("STREAMLINK_USE_PYCRYPTO"):
32 deps.append("pycrypto")
33 else:
34 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
35 deps.append("pycryptodome>=3.4.3,<4")
36
37 # for localization
38 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
39 deps.append("pycountry")
40 else:
41 deps.append("iso-639")
42 deps.append("iso3166")
43
44 # When we build an egg for the Win32 bootstrap we don"t want dependency
45 # information built into it.
46 if environ.get("NO_DEPS"):
47 deps = []
48
49 this_directory = path.abspath(path.dirname(__file__))
50 srcdir = path.join(this_directory, "src/")
51 sys_path.insert(0, srcdir)
52
53 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
54 long_description = f.read()
55
56 setup(name="streamlink",
57 version=versioneer.get_version(),
58 cmdclass=versioneer.get_cmdclass(),
59 description="Streamlink is command-line utility that extracts streams "
60 "from various services and pipes them into a video player of "
61 "choice.",
62 long_description=long_description,
63 long_description_content_type="text/markdown",
64 url="https://github.com/streamlink/streamlink",
65 project_urls={
66 "Documentation": "https://streamlink.github.io/",
67 "Tracker": "https://github.com/streamlink/streamlink/issues",
68 "Source": "https://github.com/streamlink/streamlink",
69 "Funding": "https://opencollective.com/streamlink"
70 },
71 author="Streamlink",
72 # temp until we have a mailing list / global email
73 author_email="[email protected]",
74 license="Simplified BSD",
75 packages=find_packages("src"),
76 package_dir={"": "src"},
77 entry_points={
78 "console_scripts": ["streamlink=streamlink_cli.main:main"]
79 },
80 install_requires=deps,
81 test_suite="tests",
82 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
83 classifiers=["Development Status :: 5 - Production/Stable",
84 "License :: OSI Approved :: BSD License",
85 "Environment :: Console",
86 "Intended Audience :: End Users/Desktop",
87 "Operating System :: POSIX",
88 "Operating System :: Microsoft :: Windows",
89 "Operating System :: MacOS",
90 "Programming Language :: Python :: 2.7",
91 "Programming Language :: Python :: 3.4",
92 "Programming Language :: Python :: 3.5",
93 "Programming Language :: Python :: 3.6",
94 "Programming Language :: Python :: 3.7",
95 "Topic :: Internet :: WWW/HTTP",
96 "Topic :: Multimedia :: Sound/Audio",
97 "Topic :: Multimedia :: Video",
98 "Topic :: Utilities"])
99
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,7 @@
#!/usr/bin/env python
import codecs
-from os import environ
-from os import path
-from sys import path as sys_path
+from os import environ, path
+from sys import argv, path as sys_path
from setuptools import setup, find_packages
@@ -53,6 +52,27 @@
with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
long_description = f.read()
+
+def is_wheel_for_windows():
+ if "bdist_wheel" in argv:
+ names = ["win32", "win-amd64", "cygwin"]
+ length = len(argv)
+ for pos in range(argv.index("bdist_wheel") + 1, length):
+ if argv[pos] == "--plat-name" and pos + 1 < length:
+ return argv[pos + 1] in names
+ elif argv[pos][:12] == "--plat-name=":
+ return argv[pos][12:] in names
+ return False
+
+
+entry_points = {
+ "console_scripts": ["streamlink=streamlink_cli.main:main"]
+}
+
+if is_wheel_for_windows():
+ entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
+
+
setup(name="streamlink",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
@@ -74,9 +94,7 @@
license="Simplified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
- entry_points={
- "console_scripts": ["streamlink=streamlink_cli.main:main"]
- },
+ entry_points=entry_points,
install_requires=deps,
test_suite="tests",
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,7 @@\n #!/usr/bin/env python\n import codecs\n-from os import environ\n-from os import path\n-from sys import path as sys_path\n+from os import environ, path\n+from sys import argv, path as sys_path\n \n from setuptools import setup, find_packages\n \n@@ -53,6 +52,27 @@\n with codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n \n+\n+def is_wheel_for_windows():\n+ if \"bdist_wheel\" in argv:\n+ names = [\"win32\", \"win-amd64\", \"cygwin\"]\n+ length = len(argv)\n+ for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n+ if argv[pos] == \"--plat-name\" and pos + 1 < length:\n+ return argv[pos + 1] in names\n+ elif argv[pos][:12] == \"--plat-name=\":\n+ return argv[pos][12:] in names\n+ return False\n+\n+\n+entry_points = {\n+ \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n+}\n+\n+if is_wheel_for_windows():\n+ entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n+\n+\n setup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n@@ -74,9 +94,7 @@\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n- entry_points={\n- \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n- },\n+ entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n", "issue": "Streamlink no longer provides streamlink-script.py\nAs of version 0.14.0 streamlink does not install the streamlink-script.py script,\r\nbecause of this streamlink-twitch-gui does not work as it cannot load the script.\r\nThis may an issue in streamlink, as there is no mention of its removal in the changelog.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ\nfrom os import path\nfrom sys import path as sys_path\n\nfrom setuptools import setup, find_packages\n\nimport versioneer\n\ndeps = [\n # Require backport of concurrent.futures on Python 2\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n \"requests>=2.21.0,<3.0\",\n 'urllib3[secure]>=1.23;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet\n # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x\n 'win-inet-pton;python_version<\"3.0\" and platform_system==\"Windows\"',\n # shutil.get_terminal_size and which were added in Python 3.3\n 'backports.shutil_which;python_version<\"3.3\"',\n 'backports.shutil_get_terminal_size;python_version<\"3.3\"'\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 1,728 | 449 |
gh_patches_debug_5514 | rasdani/github-patches | git_diff | streamlink__streamlink-5480 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']
### Checklist
- [X] This is a bug report and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
FFmpeg binary not validated.
Streamlink version 6.0.0.0 Linux Appimage x86_64
Got this error message:
[stream.ffmpegmux][error] Could not validate FFmpeg!
[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']
[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.
[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!
ffmpeg output:
ffmpeg -version
ffmpeg version 6.0-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2023 the FFmpeg developers
built with gcc 8 (Debian 8.3.0-6)
ffmpeg is in /usr/bin
### Debug log
```text
[utils.l10n][debug] Language code: en_US
[stream.ffmpegmux][error] Could not validate FFmpeg!
[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']
[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.
[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!
[cli][info] Available streams: 234p_alt (worst), 234p, 360p_alt2, 360p_alt, 360p, 540p, 720p_alt, 720p, 1080p (best)
[cli][info] Opening stream: 720p (hls)
[cli][info] Starting player: mpv
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][debug] Segments in this playlist are encrypted
[stream.hls][debug] First Sequence: 22258880; Last Sequence: 22259722
[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 22259720; End Sequence: None
[stream.hls][debug] Adding segment 22259720 to queue
[stream.hls][debug] Adding segment 22259721 to queue
[stream.hls][debug] Adding segment 22259722 to queue
[stream.hls][debug] Writing segment 22259720 to output
[stream.hls][debug] Segment 22259720 complete
```
Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']
### Checklist
- [X] This is a bug report and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
FFmpeg binary not validated.
Streamlink version 6.0.0.0 Linux Appimage x86_64
Got this error message:
[stream.ffmpegmux][error] Could not validate FFmpeg!
[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']
[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.
[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!
ffmpeg output:
ffmpeg -version
ffmpeg version 6.0-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2023 the FFmpeg developers
built with gcc 8 (Debian 8.3.0-6)
ffmpeg is in /usr/bin
### Debug log
```text
[utils.l10n][debug] Language code: en_US
[stream.ffmpegmux][error] Could not validate FFmpeg!
[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']
[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.
[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!
[cli][info] Available streams: 234p_alt (worst), 234p, 360p_alt2, 360p_alt, 360p, 540p, 720p_alt, 720p, 1080p (best)
[cli][info] Opening stream: 720p (hls)
[cli][info] Starting player: mpv
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][debug] Segments in this playlist are encrypted
[stream.hls][debug] First Sequence: 22258880; Last Sequence: 22259722
[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 22259720; End Sequence: None
[stream.hls][debug] Adding segment 22259720 to queue
[stream.hls][debug] Adding segment 22259721 to queue
[stream.hls][debug] Adding segment 22259722 to queue
[stream.hls][debug] Writing segment 22259720 to output
[stream.hls][debug] Segment 22259720 complete
```
</issue>
<code>
[start of src/streamlink/utils/processoutput.py]
1 import asyncio
2 from contextlib import suppress
3 from typing import Callable, List, Optional
4
5
6 class ProcessOutput:
7 def __init__(self, command: List[str], timeout: Optional[float] = None):
8 self.command = command
9 self.timeout = timeout
10
11 def run(self) -> bool: # pragma: no cover
12 return asyncio.run(self._run())
13
14 async def _run(self) -> bool:
15 loop = asyncio.get_event_loop()
16 done: asyncio.Future[bool] = loop.create_future()
17 process = await asyncio.create_subprocess_exec(
18 *self.command,
19 stdin=None,
20 stdout=asyncio.subprocess.PIPE,
21 stderr=asyncio.subprocess.PIPE,
22 )
23 if not process.stdout or not process.stderr: # pragma: no cover
24 return False
25
26 async def ontimeout():
27 if self.timeout:
28 await asyncio.sleep(self.timeout)
29 done.set_result(False)
30
31 async def onexit():
32 code = await process.wait()
33 done.set_result(self.onexit(code))
34
35 async def onoutput(callback: Callable[[int, str], Optional[bool]], streamreader: asyncio.StreamReader):
36 line: bytes
37 idx = 0
38 async for line in streamreader:
39 try:
40 result = callback(idx, line.decode().rstrip())
41 except Exception as err:
42 done.set_exception(err)
43 break
44 if result is not None:
45 done.set_result(bool(result))
46 break
47 idx += 1
48
49 tasks = (
50 loop.create_task(ontimeout()),
51 loop.create_task(onexit()),
52 loop.create_task(onoutput(self.onstdout, process.stdout)),
53 loop.create_task(onoutput(self.onstderr, process.stderr)),
54 )
55
56 try:
57 return await done
58 finally:
59 for task in tasks:
60 task.cancel()
61 with suppress(OSError):
62 process.kill()
63
64 def onexit(self, code: int) -> bool:
65 return code == 0
66
67 def onstdout(self, idx: int, line: str) -> Optional[bool]: # pragma: no cover
68 pass
69
70 def onstderr(self, idx: int, line: str) -> Optional[bool]: # pragma: no cover
71 pass
72
[end of src/streamlink/utils/processoutput.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/utils/processoutput.py b/src/streamlink/utils/processoutput.py
--- a/src/streamlink/utils/processoutput.py
+++ b/src/streamlink/utils/processoutput.py
@@ -48,9 +48,9 @@
tasks = (
loop.create_task(ontimeout()),
- loop.create_task(onexit()),
loop.create_task(onoutput(self.onstdout, process.stdout)),
loop.create_task(onoutput(self.onstderr, process.stderr)),
+ loop.create_task(onexit()),
)
try:
| {"golden_diff": "diff --git a/src/streamlink/utils/processoutput.py b/src/streamlink/utils/processoutput.py\n--- a/src/streamlink/utils/processoutput.py\n+++ b/src/streamlink/utils/processoutput.py\n@@ -48,9 +48,9 @@\n \n tasks = (\n loop.create_task(ontimeout()),\n- loop.create_task(onexit()),\n loop.create_task(onoutput(self.onstdout, process.stdout)),\n loop.create_task(onoutput(self.onstderr, process.stderr)),\n+ loop.create_task(onexit()),\n )\n \n try:\n", "issue": "Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']\n### Checklist\n\n- [X] This is a bug report and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nFFmpeg binary not validated.\r\nStreamlink version 6.0.0.0 Linux Appimage x86_64\r\nGot this error message:\r\n\r\n[stream.ffmpegmux][error] Could not validate FFmpeg!\r\n[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']\r\n[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.\r\n[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!\r\n\r\nffmpeg output:\r\nffmpeg -version\r\nffmpeg version 6.0-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2023 the FFmpeg developers\r\nbuilt with gcc 8 (Debian 8.3.0-6)\r\n\r\nffmpeg is in /usr/bin\n\n### Debug log\n\n```text\n[utils.l10n][debug] Language code: en_US\r\n[stream.ffmpegmux][error] Could not validate FFmpeg!\r\n[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']\r\n[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.\r\n[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!\r\n[cli][info] Available streams: 234p_alt (worst), 234p, 360p_alt2, 360p_alt, 360p, 540p, 720p_alt, 720p, 1080p (best)\r\n[cli][info] Opening stream: 720p (hls)\r\n[cli][info] Starting player: mpv\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][debug] Segments in this playlist are encrypted\r\n[stream.hls][debug] First Sequence: 22258880; Last Sequence: 22259722\r\n[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 22259720; End Sequence: None\r\n[stream.hls][debug] Adding segment 22259720 to queue\r\n[stream.hls][debug] Adding segment 22259721 to queue\r\n[stream.hls][debug] Adding segment 22259722 to queue\r\n[stream.hls][debug] Writing segment 22259720 to output\r\n[stream.hls][debug] Segment 22259720 complete\n```\n\nUnexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']\n### Checklist\n\n- [X] This is a bug report and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nFFmpeg binary not validated.\r\nStreamlink version 6.0.0.0 Linux Appimage x86_64\r\nGot this error message:\r\n\r\n[stream.ffmpegmux][error] Could not validate FFmpeg!\r\n[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']\r\n[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.\r\n[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!\r\n\r\nffmpeg output:\r\nffmpeg -version\r\nffmpeg version 6.0-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2023 the FFmpeg developers\r\nbuilt with gcc 8 (Debian 8.3.0-6)\r\n\r\nffmpeg is in /usr/bin\n\n### Debug log\n\n```text\n[utils.l10n][debug] Language code: en_US\r\n[stream.ffmpegmux][error] Could not validate FFmpeg!\r\n[stream.ffmpegmux][error] Unexpected FFmpeg version output while running ['/usr/bin/ffmpeg', '-version']\r\n[stream.ffmpegmux][warning] No valid FFmpeg binary was found. See the --ffmpeg-ffmpeg option.\r\n[stream.ffmpegmux][warning] Muxing streams is unsupported! Only a subset of the available streams can be returned!\r\n[cli][info] Available streams: 234p_alt (worst), 234p, 360p_alt2, 360p_alt, 360p, 540p, 720p_alt, 720p, 1080p (best)\r\n[cli][info] Opening stream: 720p (hls)\r\n[cli][info] Starting player: mpv\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][debug] Segments in this playlist are encrypted\r\n[stream.hls][debug] First Sequence: 22258880; Last Sequence: 22259722\r\n[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 22259720; End Sequence: None\r\n[stream.hls][debug] Adding segment 22259720 to queue\r\n[stream.hls][debug] Adding segment 22259721 to queue\r\n[stream.hls][debug] Adding segment 22259722 to queue\r\n[stream.hls][debug] Writing segment 22259720 to output\r\n[stream.hls][debug] Segment 22259720 complete\n```\n\n", "before_files": [{"content": "import asyncio\nfrom contextlib import suppress\nfrom typing import Callable, List, Optional\n\n\nclass ProcessOutput:\n def __init__(self, command: List[str], timeout: Optional[float] = None):\n self.command = command\n self.timeout = timeout\n\n def run(self) -> bool: # pragma: no cover\n return asyncio.run(self._run())\n\n async def _run(self) -> bool:\n loop = asyncio.get_event_loop()\n done: asyncio.Future[bool] = loop.create_future()\n process = await asyncio.create_subprocess_exec(\n *self.command,\n stdin=None,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n if not process.stdout or not process.stderr: # pragma: no cover\n return False\n\n async def ontimeout():\n if self.timeout:\n await asyncio.sleep(self.timeout)\n done.set_result(False)\n\n async def onexit():\n code = await process.wait()\n done.set_result(self.onexit(code))\n\n async def onoutput(callback: Callable[[int, str], Optional[bool]], streamreader: asyncio.StreamReader):\n line: bytes\n idx = 0\n async for line in streamreader:\n try:\n result = callback(idx, line.decode().rstrip())\n except Exception as err:\n done.set_exception(err)\n break\n if result is not None:\n done.set_result(bool(result))\n break\n idx += 1\n\n tasks = (\n loop.create_task(ontimeout()),\n loop.create_task(onexit()),\n loop.create_task(onoutput(self.onstdout, process.stdout)),\n loop.create_task(onoutput(self.onstderr, process.stderr)),\n )\n\n try:\n return await done\n finally:\n for task in tasks:\n task.cancel()\n with suppress(OSError):\n process.kill()\n\n def onexit(self, code: int) -> bool:\n return code == 0\n\n def onstdout(self, idx: int, line: str) -> Optional[bool]: # pragma: no cover\n pass\n\n def onstderr(self, idx: int, line: str) -> Optional[bool]: # pragma: no cover\n pass\n", "path": "src/streamlink/utils/processoutput.py"}]} | 2,698 | 114 |
gh_patches_debug_23364 | rasdani/github-patches | git_diff | getredash__redash-2931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google Spreadsheets: support for open by url
We currently use [gspread](https://github.com/burnash/gspread)'s [`open_by_key`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_key) method to load spreadsheets. But it also has the [`open_by_url`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_url) method.
Using the later we can add support for just providing the spreadsheet URL, which will be much more friendly to end users.
To remain backward compatible we can check if the query is of format `http...` -> use the open by url method, otherwise use the open by key one.
Google Spreadsheets: support for open by url
We currently use [gspread](https://github.com/burnash/gspread)'s [`open_by_key`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_key) method to load spreadsheets. But it also has the [`open_by_url`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_url) method.
Using the later we can add support for just providing the spreadsheet URL, which will be much more friendly to end users.
To remain backward compatible we can check if the query is of format `http...` -> use the open by url method, otherwise use the open by key one.
</issue>
<code>
[start of redash/query_runner/google_spreadsheets.py]
1 import logging
2 from base64 import b64decode
3
4 from dateutil import parser
5 from requests import Session
6 from xlsxwriter.utility import xl_col_to_name
7
8 from redash.query_runner import *
9 from redash.utils import json_dumps, json_loads
10
11 logger = logging.getLogger(__name__)
12
13 try:
14 import gspread
15 from gspread.httpsession import HTTPSession
16 from oauth2client.service_account import ServiceAccountCredentials
17
18 enabled = True
19 except ImportError:
20 enabled = False
21
22
23 def _load_key(filename):
24 with open(filename, "rb") as f:
25 return json_loads(f.read())
26
27
28 def _get_columns_and_column_names(row):
29 column_names = []
30 columns = []
31 duplicate_counter = 1
32
33 for i, column_name in enumerate(row):
34 if not column_name:
35 column_name = 'column_{}'.format(xl_col_to_name(i))
36
37 if column_name in column_names:
38 column_name = u"{}{}".format(column_name, duplicate_counter)
39 duplicate_counter += 1
40
41 column_names.append(column_name)
42 columns.append({
43 'name': column_name,
44 'friendly_name': column_name,
45 'type': TYPE_STRING
46 })
47
48 return columns, column_names
49
50
51 def _guess_type(value):
52 if value == '':
53 return TYPE_STRING
54 try:
55 val = int(value)
56 return TYPE_INTEGER
57 except ValueError:
58 pass
59 try:
60 val = float(value)
61 return TYPE_FLOAT
62 except ValueError:
63 pass
64 if unicode(value).lower() in ('true', 'false'):
65 return TYPE_BOOLEAN
66 try:
67 val = parser.parse(value)
68 return TYPE_DATETIME
69 except (ValueError, OverflowError):
70 pass
71 return TYPE_STRING
72
73
74 def _value_eval_list(row_values, col_types):
75 value_list = []
76 raw_values = zip(col_types, row_values)
77 for typ, rval in raw_values:
78 try:
79 if rval is None or rval == '':
80 val = None
81 elif typ == TYPE_BOOLEAN:
82 val = True if unicode(rval).lower() == 'true' else False
83 elif typ == TYPE_DATETIME:
84 val = parser.parse(rval)
85 elif typ == TYPE_FLOAT:
86 val = float(rval)
87 elif typ == TYPE_INTEGER:
88 val = int(rval)
89 else:
90 # for TYPE_STRING and default
91 val = unicode(rval)
92 value_list.append(val)
93 except (ValueError, OverflowError):
94 value_list.append(rval)
95 return value_list
96
97
98 HEADER_INDEX = 0
99
100
101 class WorksheetNotFoundError(Exception):
102 def __init__(self, worksheet_num, worksheet_count):
103 message = "Worksheet number {} not found. Spreadsheet has {} worksheets. Note that the worksheet count is zero based.".format(worksheet_num, worksheet_count)
104 super(WorksheetNotFoundError, self).__init__(message)
105
106
107 def parse_query(query):
108 values = query.split("|")
109 key = values[0] # key of the spreadsheet
110 worksheet_num = 0 if len(values) != 2 else int(values[1]) # if spreadsheet contains more than one worksheet - this is the number of it
111
112 return key, worksheet_num
113
114
115 def parse_worksheet(worksheet):
116 if not worksheet:
117 return {'columns': [], 'rows': []}
118
119 columns, column_names = _get_columns_and_column_names(worksheet[HEADER_INDEX])
120
121 if len(worksheet) > 1:
122 for j, value in enumerate(worksheet[HEADER_INDEX + 1]):
123 columns[j]['type'] = _guess_type(value)
124
125 column_types = [c['type'] for c in columns]
126 rows = [dict(zip(column_names, _value_eval_list(row, column_types))) for row in worksheet[HEADER_INDEX + 1:]]
127 data = {'columns': columns, 'rows': rows}
128
129 return data
130
131
132 def parse_spreadsheet(spreadsheet, worksheet_num):
133 worksheets = spreadsheet.worksheets()
134 worksheet_count = len(worksheets)
135 if worksheet_num >= worksheet_count:
136 raise WorksheetNotFoundError(worksheet_num, worksheet_count)
137
138 worksheet = worksheets[worksheet_num].get_all_values()
139
140 return parse_worksheet(worksheet)
141
142
143 class TimeoutSession(Session):
144 def request(self, *args, **kwargs):
145 kwargs.setdefault('timeout', 300)
146 return super(TimeoutSession, self).request(*args, **kwargs)
147
148
149 class GoogleSpreadsheet(BaseQueryRunner):
150 @classmethod
151 def annotate_query(cls):
152 return False
153
154 @classmethod
155 def type(cls):
156 return "google_spreadsheets"
157
158 @classmethod
159 def enabled(cls):
160 return enabled
161
162 @classmethod
163 def configuration_schema(cls):
164 return {
165 'type': 'object',
166 'properties': {
167 'jsonKeyFile': {
168 "type": "string",
169 'title': 'JSON Key File'
170 }
171 },
172 'required': ['jsonKeyFile'],
173 'secret': ['jsonKeyFile']
174 }
175
176 def _get_spreadsheet_service(self):
177 scope = [
178 'https://spreadsheets.google.com/feeds',
179 ]
180
181 key = json_loads(b64decode(self.configuration['jsonKeyFile']))
182 creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
183
184 timeout_session = HTTPSession()
185 timeout_session.requests_session = TimeoutSession()
186 spreadsheetservice = gspread.Client(auth=creds, http_session=timeout_session)
187 spreadsheetservice.login()
188 return spreadsheetservice
189
190 def test_connection(self):
191 self._get_spreadsheet_service()
192
193 def run_query(self, query, user):
194 logger.debug("Spreadsheet is about to execute query: %s", query)
195 key, worksheet_num = parse_query(query)
196
197 try:
198 spreadsheet_service = self._get_spreadsheet_service()
199 spreadsheet = spreadsheet_service.open_by_key(key)
200
201 data = parse_spreadsheet(spreadsheet, worksheet_num)
202
203 return json_dumps(data), None
204 except gspread.SpreadsheetNotFound:
205 return None, "Spreadsheet ({}) not found. Make sure you used correct id.".format(key)
206
207
208 register(GoogleSpreadsheet)
209
[end of redash/query_runner/google_spreadsheets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/query_runner/google_spreadsheets.py b/redash/query_runner/google_spreadsheets.py
--- a/redash/query_runner/google_spreadsheets.py
+++ b/redash/query_runner/google_spreadsheets.py
@@ -147,6 +147,7 @@
class GoogleSpreadsheet(BaseQueryRunner):
+
@classmethod
def annotate_query(cls):
return False
@@ -190,13 +191,22 @@
def test_connection(self):
self._get_spreadsheet_service()
+ def is_url_key(self, key):
+ if key.startswith('https://'):
+ return True
+ return False
+
def run_query(self, query, user):
logger.debug("Spreadsheet is about to execute query: %s", query)
key, worksheet_num = parse_query(query)
try:
spreadsheet_service = self._get_spreadsheet_service()
- spreadsheet = spreadsheet_service.open_by_key(key)
+
+ if self.is_url_key(key):
+ spreadsheet = spreadsheet_service.open_by_url(key)
+ else:
+ spreadsheet = spreadsheet_service.open_by_key(key)
data = parse_spreadsheet(spreadsheet, worksheet_num)
| {"golden_diff": "diff --git a/redash/query_runner/google_spreadsheets.py b/redash/query_runner/google_spreadsheets.py\n--- a/redash/query_runner/google_spreadsheets.py\n+++ b/redash/query_runner/google_spreadsheets.py\n@@ -147,6 +147,7 @@\n \n \n class GoogleSpreadsheet(BaseQueryRunner):\n+\n @classmethod\n def annotate_query(cls):\n return False\n@@ -190,13 +191,22 @@\n def test_connection(self):\n self._get_spreadsheet_service()\n \n+ def is_url_key(self, key):\n+ if key.startswith('https://'):\n+ return True\n+ return False\n+\n def run_query(self, query, user):\n logger.debug(\"Spreadsheet is about to execute query: %s\", query)\n key, worksheet_num = parse_query(query)\n \n try:\n spreadsheet_service = self._get_spreadsheet_service()\n- spreadsheet = spreadsheet_service.open_by_key(key)\n+\n+ if self.is_url_key(key):\n+ spreadsheet = spreadsheet_service.open_by_url(key)\n+ else:\n+ spreadsheet = spreadsheet_service.open_by_key(key)\n \n data = parse_spreadsheet(spreadsheet, worksheet_num)\n", "issue": "Google Spreadsheets: support for open by url\nWe currently use [gspread](https://github.com/burnash/gspread)'s [`open_by_key`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_key) method to load spreadsheets. But it also has the [`open_by_url`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_url) method.\r\n\r\nUsing the later we can add support for just providing the spreadsheet URL, which will be much more friendly to end users. \r\n\r\nTo remain backward compatible we can check if the query is of format `http...` -> use the open by url method, otherwise use the open by key one.\nGoogle Spreadsheets: support for open by url\nWe currently use [gspread](https://github.com/burnash/gspread)'s [`open_by_key`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_key) method to load spreadsheets. But it also has the [`open_by_url`](https://gspread.readthedocs.io/en/latest/#gspread.Client.open_by_url) method.\r\n\r\nUsing the later we can add support for just providing the spreadsheet URL, which will be much more friendly to end users. \r\n\r\nTo remain backward compatible we can check if the query is of format `http...` -> use the open by url method, otherwise use the open by key one.\n", "before_files": [{"content": "import logging\nfrom base64 import b64decode\n\nfrom dateutil import parser\nfrom requests import Session\nfrom xlsxwriter.utility import xl_col_to_name\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import gspread\n from gspread.httpsession import HTTPSession\n from oauth2client.service_account import ServiceAccountCredentials\n\n enabled = True\nexcept ImportError:\n enabled = False\n\n\ndef _load_key(filename):\n with open(filename, \"rb\") as f:\n return json_loads(f.read())\n\n\ndef _get_columns_and_column_names(row):\n column_names = []\n columns = []\n duplicate_counter = 1\n\n for i, column_name in enumerate(row):\n if not column_name:\n column_name = 'column_{}'.format(xl_col_to_name(i))\n\n if column_name in column_names:\n column_name = u\"{}{}\".format(column_name, duplicate_counter)\n duplicate_counter += 1\n\n column_names.append(column_name)\n columns.append({\n 'name': column_name,\n 'friendly_name': column_name,\n 'type': TYPE_STRING\n })\n\n return columns, column_names\n\n\ndef _guess_type(value):\n if value == '':\n return TYPE_STRING\n try:\n val = int(value)\n return TYPE_INTEGER\n except ValueError:\n pass\n try:\n val = float(value)\n return TYPE_FLOAT\n except ValueError:\n pass\n if unicode(value).lower() in ('true', 'false'):\n return TYPE_BOOLEAN\n try:\n val = parser.parse(value)\n return TYPE_DATETIME\n except (ValueError, OverflowError):\n pass\n return TYPE_STRING\n\n\ndef _value_eval_list(row_values, col_types):\n value_list = []\n raw_values = zip(col_types, row_values)\n for typ, rval in raw_values:\n try:\n if rval is None or rval == '':\n val = None\n elif typ == TYPE_BOOLEAN:\n val = True if unicode(rval).lower() == 'true' else False\n elif typ == TYPE_DATETIME:\n val = parser.parse(rval)\n elif typ == TYPE_FLOAT:\n val = float(rval)\n elif typ == TYPE_INTEGER:\n val = int(rval)\n else:\n # for TYPE_STRING and default\n val = unicode(rval)\n value_list.append(val)\n except (ValueError, OverflowError):\n value_list.append(rval)\n return value_list\n\n\nHEADER_INDEX = 0\n\n\nclass WorksheetNotFoundError(Exception):\n def __init__(self, worksheet_num, worksheet_count):\n message = \"Worksheet number {} not found. Spreadsheet has {} worksheets. Note that the worksheet count is zero based.\".format(worksheet_num, worksheet_count)\n super(WorksheetNotFoundError, self).__init__(message)\n\n\ndef parse_query(query):\n values = query.split(\"|\")\n key = values[0] # key of the spreadsheet\n worksheet_num = 0 if len(values) != 2 else int(values[1]) # if spreadsheet contains more than one worksheet - this is the number of it\n\n return key, worksheet_num\n\n\ndef parse_worksheet(worksheet):\n if not worksheet:\n return {'columns': [], 'rows': []}\n\n columns, column_names = _get_columns_and_column_names(worksheet[HEADER_INDEX])\n\n if len(worksheet) > 1:\n for j, value in enumerate(worksheet[HEADER_INDEX + 1]):\n columns[j]['type'] = _guess_type(value)\n\n column_types = [c['type'] for c in columns]\n rows = [dict(zip(column_names, _value_eval_list(row, column_types))) for row in worksheet[HEADER_INDEX + 1:]]\n data = {'columns': columns, 'rows': rows}\n\n return data\n\n\ndef parse_spreadsheet(spreadsheet, worksheet_num):\n worksheets = spreadsheet.worksheets()\n worksheet_count = len(worksheets)\n if worksheet_num >= worksheet_count:\n raise WorksheetNotFoundError(worksheet_num, worksheet_count)\n\n worksheet = worksheets[worksheet_num].get_all_values()\n\n return parse_worksheet(worksheet)\n\n\nclass TimeoutSession(Session):\n def request(self, *args, **kwargs):\n kwargs.setdefault('timeout', 300)\n return super(TimeoutSession, self).request(*args, **kwargs)\n\n\nclass GoogleSpreadsheet(BaseQueryRunner):\n @classmethod\n def annotate_query(cls):\n return False\n\n @classmethod\n def type(cls):\n return \"google_spreadsheets\"\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'jsonKeyFile': {\n \"type\": \"string\",\n 'title': 'JSON Key File'\n }\n },\n 'required': ['jsonKeyFile'],\n 'secret': ['jsonKeyFile']\n }\n\n def _get_spreadsheet_service(self):\n scope = [\n 'https://spreadsheets.google.com/feeds',\n ]\n\n key = json_loads(b64decode(self.configuration['jsonKeyFile']))\n creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)\n\n timeout_session = HTTPSession()\n timeout_session.requests_session = TimeoutSession()\n spreadsheetservice = gspread.Client(auth=creds, http_session=timeout_session)\n spreadsheetservice.login()\n return spreadsheetservice\n\n def test_connection(self):\n self._get_spreadsheet_service()\n\n def run_query(self, query, user):\n logger.debug(\"Spreadsheet is about to execute query: %s\", query)\n key, worksheet_num = parse_query(query)\n\n try:\n spreadsheet_service = self._get_spreadsheet_service()\n spreadsheet = spreadsheet_service.open_by_key(key)\n\n data = parse_spreadsheet(spreadsheet, worksheet_num)\n\n return json_dumps(data), None\n except gspread.SpreadsheetNotFound:\n return None, \"Spreadsheet ({}) not found. Make sure you used correct id.\".format(key)\n\n\nregister(GoogleSpreadsheet)\n", "path": "redash/query_runner/google_spreadsheets.py"}]} | 2,695 | 263 |
gh_patches_debug_4033 | rasdani/github-patches | git_diff | psychopy__psychopy-3176 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
autocomplete is raising an error on mac standalone
My guess is that jedi needs to be packaged differently so that it isn't "frozen"
</issue>
<code>
[start of setupApp.py]
1 #!/usr/bin/env python
2 ################
3 # see notes at bottom for requirements
4 from __future__ import absolute_import, print_function
5 import glob
6 import os
7 import sys
8 from sys import platform
9 from distutils.core import setup
10 from pkg_resources import parse_version
11 # import versioneer
12 import psychopy
13 version = psychopy.__version__
14
15 # regenerate __init__.py only if we're in the source repos (not in a zip file)
16 try:
17 import createInitFile # won't exist in a sdist.zip
18 writeNewInit=True
19 except:
20 writeNewInit=False
21 if writeNewInit:
22 vStr = createInitFile.createInitFile(dist='bdist')
23
24 #define the extensions to compile if necess
25 packageData = []
26 requires = []
27
28 if platform != 'darwin':
29 raise RuntimeError("setupApp.py is only for building Mac Standalone bundle")
30
31 import bdist_mpkg
32 import py2app
33 resources = glob.glob('psychopy/app/Resources/*')
34 resources.append('/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7/pyconfig.h')
35 frameworks = ["/usr/lib/libxml2.2.dylib", #"libyaml.dylib",
36 "libevent.dylib", "libffi.dylib",
37 "libmp3lame.0.dylib",
38 "/usr/local/Cellar/glfw/3.2.1/lib/libglfw.3.2.dylib",
39 ]
40 opencvLibs = glob.glob(os.path.join(sys.exec_prefix, 'lib', 'libopencv*.2.4.dylib'))
41 frameworks.extend(opencvLibs)
42
43 import macholib
44 #print("~"*60 + "macholib version: "+macholib.__version__)
45
46 if parse_version(macholib.__version__) <= parse_version('1.7'):
47 print("Applying macholib patch...")
48 import macholib.dyld
49 import macholib.MachOGraph
50 dyld_find_1_7 = macholib.dyld.dyld_find
51 def dyld_find(name, loader=None, **kwargs):
52 #print("~"*60 + "calling alternate dyld_find")
53 if loader is not None:
54 kwargs['loader_path'] = loader
55 return dyld_find_1_7(name, **kwargs)
56 macholib.MachOGraph.dyld_find = dyld_find
57
58 includes = ['Tkinter', 'tkFileDialog',
59 'imp', 'subprocess', 'shlex',
60 'shelve', # for scipy.io
61 '_elementtree', 'pyexpat', # for openpyxl
62 'hid',
63 'pyo', 'greenlet', 'zmq', 'tornado',
64 'psutil', # for iohub
65 'tobii_research', # need tobii_research file and tobiiresearch pkg
66 'pysoundcard', 'soundfile', 'sounddevice', 'readline',
67 'hid',
68 'xlwt', # writes excel files for pandas
69 'vlc', # install with pip install python-vlc
70 'msgpack_numpy',
71 'configparser',
72 ]
73 packages = ['wx', 'psychopy',
74 'pyglet', 'pytz', 'OpenGL', 'glfw',
75 'scipy', 'matplotlib', 'lxml', 'xml', 'openpyxl',
76 'moviepy', 'imageio', 'imageio_ffmpeg',
77 '_sounddevice_data', '_soundfile_data',
78 'cffi', 'pycparser',
79 'PIL', # 'Image',
80 'objc', 'Quartz', 'AppKit', 'QTKit', 'Cocoa',
81 'Foundation', 'CoreFoundation',
82 'pkg_resources', # needed for objc
83 'pyolib', 'pyo',
84 'requests', 'certifi', 'cryptography',
85 'pyosf',
86 # for unit testing
87 'coverage',
88 # handy external science libs
89 'serial',
90 'egi', 'pylink', 'tobiiresearch',
91 'pyxid', 'pyxid2', 'ftd2xx', # ftd2xx is used by cedrus
92 'pandas', 'tables', # 'cython',
93 'msgpack', 'yaml', 'gevent', # for ioHub
94 # these aren't needed, but liked
95 'psychopy_ext', 'pyfilesec',
96 'bidi', 'arabic_reshaper', # for right-left language conversions
97 # for Py3 compatibility
98 'future', 'past', 'lib2to3',
99 'json_tricks', # allows saving arrays/dates in json
100 'git', 'gitlab',
101 'astunparse', 'esprima', # for translating/adapting py/JS
102 'pylsl', 'pygaze', 'smite',
103 'cv2',
104 'badapted', 'darc_toolbox', # adaptive methods from Ben Vincent
105 'questplus',
106 'metapensiero.pj', 'dukpy', 'macropy',
107 'jedi',
108 'psychtoolbox',
109 'freetype', 'h5py',
110 ]
111
112 if sys.version_info.major >= 3:
113 packages.extend(['PyQt5'])
114 else:
115 # not available or not working under Python3:
116 includes.extend(['UserString', 'ioLabs', 'FileDialog'])
117 packages.extend(['PyQt4', 'labjack', 'rusocsci'])
118 # is available but py2app can't seem to find it:
119 packages.extend(['OpenGL'])
120
121 setup(
122 app=['psychopy/app/psychopyApp.py'],
123 options=dict(py2app=dict(
124 includes=includes,
125 packages=packages,
126 excludes=['bsddb', 'jinja2', 'IPython','ipython_genutils','nbconvert',
127 'libsz.2.dylib', 'pygame',
128 # 'stringprep',
129 'functools32',
130 ], # anything we need to forcibly exclude?
131 resources=resources,
132 argv_emulation=True,
133 site_packages=True,
134 frameworks=frameworks,
135 iconfile='psychopy/app/Resources/psychopy.icns',
136 plist=dict(
137 CFBundleIconFile='psychopy.icns',
138 CFBundleName = "PsychoPy",
139 CFBundleShortVersionString = version, # must be in X.X.X format
140 CFBundleVersion = version,
141 CFBundleExecutable = "PsychoPy",
142 CFBundleIdentifier = "org.opensciencetools.psychopy",
143 CFBundleLicense = "GNU GPLv3+",
144 NSHumanReadableCopyright = "Open Science Tools Limited",
145 CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],
146 CFBundleTypeRole='Editor')],
147 LSEnvironment=dict(PATH="/usr/local/git/bin:/usr/local/bin:"
148 "/usr/local:/usr/bin:/usr/sbin"),
149 ),
150 )) # end of the options dict
151 )
152
153
154 # ugly hack for opencv2:
155 # As of opencv 2.4.5 the cv2.so binary used rpath to a fixed
156 # location to find libs and even more annoyingly it then appended
157 # 'lib' to the rpath as well. These were fine for the packaged
158 # framework python but the libs in an app bundle are different.
159 # So, create symlinks so they appear in the same place as in framework python
160 rpath = "dist/PsychoPy.app/Contents/Resources/"
161 for libPath in opencvLibs:
162 libname = os.path.split(libPath)[-1]
163 realPath = "../../Frameworks/"+libname # relative path (w.r.t. the fake)
164 fakePath = os.path.join(rpath, "lib", libname)
165 os.symlink(realPath, fakePath)
166 # they even did this for Python lib itself, which is in diff location
167 realPath = "../Frameworks/Python.framework/Python" # relative to the fake path
168 fakePath = os.path.join(rpath, "Python")
169 os.symlink(realPath, fakePath)
170
171 if writeNewInit:
172 # remove unwanted info about this system post-build
173 createInitFile.createInitFile(dist=None)
174
[end of setupApp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setupApp.py b/setupApp.py
--- a/setupApp.py
+++ b/setupApp.py
@@ -104,7 +104,7 @@
'badapted', 'darc_toolbox', # adaptive methods from Ben Vincent
'questplus',
'metapensiero.pj', 'dukpy', 'macropy',
- 'jedi',
+ 'jedi','parso',
'psychtoolbox',
'freetype', 'h5py',
]
| {"golden_diff": "diff --git a/setupApp.py b/setupApp.py\n--- a/setupApp.py\n+++ b/setupApp.py\n@@ -104,7 +104,7 @@\n 'badapted', 'darc_toolbox', # adaptive methods from Ben Vincent\n 'questplus',\n 'metapensiero.pj', 'dukpy', 'macropy',\n- 'jedi',\n+ 'jedi','parso',\n 'psychtoolbox',\n 'freetype', 'h5py',\n ]\n", "issue": "autocomplete is raising an error on mac standalone\nMy guess is that jedi needs to be packaged differently so that it isn't \"frozen\"\n", "before_files": [{"content": "#!/usr/bin/env python\n################\n# see notes at bottom for requirements\nfrom __future__ import absolute_import, print_function\nimport glob\nimport os\nimport sys\nfrom sys import platform\nfrom distutils.core import setup\nfrom pkg_resources import parse_version\n# import versioneer\nimport psychopy\nversion = psychopy.__version__\n\n# regenerate __init__.py only if we're in the source repos (not in a zip file)\ntry:\n import createInitFile # won't exist in a sdist.zip\n writeNewInit=True\nexcept:\n writeNewInit=False\nif writeNewInit:\n vStr = createInitFile.createInitFile(dist='bdist')\n\n#define the extensions to compile if necess\npackageData = []\nrequires = []\n\nif platform != 'darwin':\n raise RuntimeError(\"setupApp.py is only for building Mac Standalone bundle\")\n\nimport bdist_mpkg\nimport py2app\nresources = glob.glob('psychopy/app/Resources/*')\nresources.append('/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7/pyconfig.h')\nframeworks = [\"/usr/lib/libxml2.2.dylib\", #\"libyaml.dylib\",\n \"libevent.dylib\", \"libffi.dylib\",\n \"libmp3lame.0.dylib\",\n \"/usr/local/Cellar/glfw/3.2.1/lib/libglfw.3.2.dylib\",\n ]\nopencvLibs = glob.glob(os.path.join(sys.exec_prefix, 'lib', 'libopencv*.2.4.dylib'))\nframeworks.extend(opencvLibs)\n\nimport macholib\n#print(\"~\"*60 + \"macholib version: \"+macholib.__version__)\n\nif parse_version(macholib.__version__) <= parse_version('1.7'):\n print(\"Applying macholib patch...\")\n import macholib.dyld\n import macholib.MachOGraph\n dyld_find_1_7 = macholib.dyld.dyld_find\n def dyld_find(name, loader=None, **kwargs):\n #print(\"~\"*60 + \"calling alternate dyld_find\")\n if loader is not None:\n kwargs['loader_path'] = loader\n return dyld_find_1_7(name, **kwargs)\n macholib.MachOGraph.dyld_find = dyld_find\n\nincludes = ['Tkinter', 'tkFileDialog',\n 'imp', 'subprocess', 'shlex',\n 'shelve', # for scipy.io\n '_elementtree', 'pyexpat', # for openpyxl\n 'hid',\n 'pyo', 'greenlet', 'zmq', 'tornado',\n 'psutil', # for iohub\n 'tobii_research', # need tobii_research file and tobiiresearch pkg\n 'pysoundcard', 'soundfile', 'sounddevice', 'readline',\n 'hid',\n 'xlwt', # writes excel files for pandas\n 'vlc', # install with pip install python-vlc\n 'msgpack_numpy',\n 'configparser',\n ]\npackages = ['wx', 'psychopy',\n 'pyglet', 'pytz', 'OpenGL', 'glfw',\n 'scipy', 'matplotlib', 'lxml', 'xml', 'openpyxl',\n 'moviepy', 'imageio', 'imageio_ffmpeg',\n '_sounddevice_data', '_soundfile_data',\n 'cffi', 'pycparser',\n 'PIL', # 'Image',\n 'objc', 'Quartz', 'AppKit', 'QTKit', 'Cocoa',\n 'Foundation', 'CoreFoundation',\n 'pkg_resources', # needed for objc\n 'pyolib', 'pyo',\n 'requests', 'certifi', 'cryptography',\n 'pyosf',\n # for unit testing\n 'coverage',\n # handy external science libs\n 'serial',\n 'egi', 'pylink', 'tobiiresearch',\n 'pyxid', 'pyxid2', 'ftd2xx', # ftd2xx is used by cedrus\n 'pandas', 'tables', # 'cython',\n 'msgpack', 'yaml', 'gevent', # for ioHub\n # these aren't needed, but liked\n 'psychopy_ext', 'pyfilesec',\n 'bidi', 'arabic_reshaper', # for right-left language conversions\n # for Py3 compatibility\n 'future', 'past', 'lib2to3',\n 'json_tricks', # allows saving arrays/dates in json\n 'git', 'gitlab',\n 'astunparse', 'esprima', # for translating/adapting py/JS\n 'pylsl', 'pygaze', 'smite',\n 'cv2',\n 'badapted', 'darc_toolbox', # adaptive methods from Ben Vincent\n 'questplus',\n 'metapensiero.pj', 'dukpy', 'macropy',\n 'jedi',\n 'psychtoolbox',\n 'freetype', 'h5py',\n ]\n\nif sys.version_info.major >= 3:\n packages.extend(['PyQt5'])\nelse:\n # not available or not working under Python3:\n includes.extend(['UserString', 'ioLabs', 'FileDialog'])\n packages.extend(['PyQt4', 'labjack', 'rusocsci'])\n # is available but py2app can't seem to find it:\n packages.extend(['OpenGL'])\n\nsetup(\n app=['psychopy/app/psychopyApp.py'],\n options=dict(py2app=dict(\n includes=includes,\n packages=packages,\n excludes=['bsddb', 'jinja2', 'IPython','ipython_genutils','nbconvert',\n 'libsz.2.dylib', 'pygame',\n # 'stringprep',\n 'functools32',\n ], # anything we need to forcibly exclude?\n resources=resources,\n argv_emulation=True,\n site_packages=True,\n frameworks=frameworks,\n iconfile='psychopy/app/Resources/psychopy.icns',\n plist=dict(\n CFBundleIconFile='psychopy.icns',\n CFBundleName = \"PsychoPy\",\n CFBundleShortVersionString = version, # must be in X.X.X format\n CFBundleVersion = version,\n CFBundleExecutable = \"PsychoPy\",\n CFBundleIdentifier = \"org.opensciencetools.psychopy\",\n CFBundleLicense = \"GNU GPLv3+\",\n NSHumanReadableCopyright = \"Open Science Tools Limited\",\n CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],\n CFBundleTypeRole='Editor')],\n LSEnvironment=dict(PATH=\"/usr/local/git/bin:/usr/local/bin:\"\n \"/usr/local:/usr/bin:/usr/sbin\"),\n ),\n )) # end of the options dict\n)\n\n\n# ugly hack for opencv2:\n# As of opencv 2.4.5 the cv2.so binary used rpath to a fixed\n# location to find libs and even more annoyingly it then appended\n# 'lib' to the rpath as well. These were fine for the packaged\n# framework python but the libs in an app bundle are different.\n# So, create symlinks so they appear in the same place as in framework python\nrpath = \"dist/PsychoPy.app/Contents/Resources/\"\nfor libPath in opencvLibs:\n libname = os.path.split(libPath)[-1]\n realPath = \"../../Frameworks/\"+libname # relative path (w.r.t. the fake)\n fakePath = os.path.join(rpath, \"lib\", libname)\n os.symlink(realPath, fakePath)\n# they even did this for Python lib itself, which is in diff location\nrealPath = \"../Frameworks/Python.framework/Python\" # relative to the fake path\nfakePath = os.path.join(rpath, \"Python\")\nos.symlink(realPath, fakePath)\n\nif writeNewInit:\n # remove unwanted info about this system post-build\n createInitFile.createInitFile(dist=None)\n", "path": "setupApp.py"}]} | 2,722 | 113 |
gh_patches_debug_9562 | rasdani/github-patches | git_diff | explosion__spaCy-3004 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Initialization in text
In `evaluate `method from
https://github.com/explosion/spacy/blob/master/examples/training/train_textcat.py
tp, fp, fn, and tn (true positives etc) are initialized to 1e-8 (see below)
```
tp = 1e-8 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negative
tn = 1e-8 # True negatives
```
Why are the values not initlaized to 0? When there are no true positives the precision is 0.5, That doesn't seem right.
</issue>
<code>
[start of examples/training/train_textcat.py]
1 #!/usr/bin/env python
2 # coding: utf8
3 """Train a convolutional neural network text classifier on the
4 IMDB dataset, using the TextCategorizer component. The dataset will be loaded
5 automatically via Thinc's built-in dataset loader. The model is added to
6 spacy.pipeline, and predictions are available via `doc.cats`. For more details,
7 see the documentation:
8 * Training: https://spacy.io/usage/training
9
10 Compatible with: spaCy v2.0.0+
11 """
12 from __future__ import unicode_literals, print_function
13 import plac
14 import random
15 from pathlib import Path
16 import thinc.extra.datasets
17
18 import spacy
19 from spacy.util import minibatch, compounding
20
21
22 @plac.annotations(
23 model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
24 output_dir=("Optional output directory", "option", "o", Path),
25 n_texts=("Number of texts to train from", "option", "t", int),
26 n_iter=("Number of training iterations", "option", "n", int))
27 def main(model=None, output_dir=None, n_iter=20, n_texts=2000):
28 if model is not None:
29 nlp = spacy.load(model) # load existing spaCy model
30 print("Loaded model '%s'" % model)
31 else:
32 nlp = spacy.blank('en') # create blank Language class
33 print("Created blank 'en' model")
34
35 # add the text classifier to the pipeline if it doesn't exist
36 # nlp.create_pipe works for built-ins that are registered with spaCy
37 if 'textcat' not in nlp.pipe_names:
38 textcat = nlp.create_pipe('textcat')
39 nlp.add_pipe(textcat, last=True)
40 # otherwise, get it, so we can add labels to it
41 else:
42 textcat = nlp.get_pipe('textcat')
43
44 # add label to text classifier
45 textcat.add_label('POSITIVE')
46
47 # load the IMDB dataset
48 print("Loading IMDB data...")
49 (train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)
50 print("Using {} examples ({} training, {} evaluation)"
51 .format(n_texts, len(train_texts), len(dev_texts)))
52 train_data = list(zip(train_texts,
53 [{'cats': cats} for cats in train_cats]))
54
55 # get names of other pipes to disable them during training
56 other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
57 with nlp.disable_pipes(*other_pipes): # only train textcat
58 optimizer = nlp.begin_training()
59 print("Training the model...")
60 print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
61 for i in range(n_iter):
62 losses = {}
63 # batch up the examples using spaCy's minibatch
64 batches = minibatch(train_data, size=compounding(4., 32., 1.001))
65 for batch in batches:
66 texts, annotations = zip(*batch)
67 nlp.update(texts, annotations, sgd=optimizer, drop=0.2,
68 losses=losses)
69 with textcat.model.use_params(optimizer.averages):
70 # evaluate on the dev data split off in load_data()
71 scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
72 print('{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table
73 .format(losses['textcat'], scores['textcat_p'],
74 scores['textcat_r'], scores['textcat_f']))
75
76 # test the trained model
77 test_text = "This movie sucked"
78 doc = nlp(test_text)
79 print(test_text, doc.cats)
80
81 if output_dir is not None:
82 output_dir = Path(output_dir)
83 if not output_dir.exists():
84 output_dir.mkdir()
85 nlp.to_disk(output_dir)
86 print("Saved model to", output_dir)
87
88 # test the saved model
89 print("Loading from", output_dir)
90 nlp2 = spacy.load(output_dir)
91 doc2 = nlp2(test_text)
92 print(test_text, doc2.cats)
93
94
95 def load_data(limit=0, split=0.8):
96 """Load data from the IMDB dataset."""
97 # Partition off part of the train data for evaluation
98 train_data, _ = thinc.extra.datasets.imdb()
99 random.shuffle(train_data)
100 train_data = train_data[-limit:]
101 texts, labels = zip(*train_data)
102 cats = [{'POSITIVE': bool(y)} for y in labels]
103 split = int(len(train_data) * split)
104 return (texts[:split], cats[:split]), (texts[split:], cats[split:])
105
106
107 def evaluate(tokenizer, textcat, texts, cats):
108 docs = (tokenizer(text) for text in texts)
109 tp = 1e-8 # True positives
110 fp = 1e-8 # False positives
111 fn = 1e-8 # False negatives
112 tn = 1e-8 # True negatives
113 for i, doc in enumerate(textcat.pipe(docs)):
114 gold = cats[i]
115 for label, score in doc.cats.items():
116 if label not in gold:
117 continue
118 if score >= 0.5 and gold[label] >= 0.5:
119 tp += 1.
120 elif score >= 0.5 and gold[label] < 0.5:
121 fp += 1.
122 elif score < 0.5 and gold[label] < 0.5:
123 tn += 1
124 elif score < 0.5 and gold[label] >= 0.5:
125 fn += 1
126 precision = tp / (tp + fp)
127 recall = tp / (tp + fn)
128 f_score = 2 * (precision * recall) / (precision + recall)
129 return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}
130
131
132 if __name__ == '__main__':
133 plac.call(main)
134
[end of examples/training/train_textcat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/training/train_textcat.py b/examples/training/train_textcat.py
--- a/examples/training/train_textcat.py
+++ b/examples/training/train_textcat.py
@@ -106,10 +106,10 @@
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
- tp = 1e-8 # True positives
+ tp = 0.0 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
- tn = 1e-8 # True negatives
+ tn = 0.0 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
| {"golden_diff": "diff --git a/examples/training/train_textcat.py b/examples/training/train_textcat.py\n--- a/examples/training/train_textcat.py\n+++ b/examples/training/train_textcat.py\n@@ -106,10 +106,10 @@\n \n def evaluate(tokenizer, textcat, texts, cats):\n docs = (tokenizer(text) for text in texts)\n- tp = 1e-8 # True positives\n+ tp = 0.0 # True positives\n fp = 1e-8 # False positives\n fn = 1e-8 # False negatives\n- tn = 1e-8 # True negatives\n+ tn = 0.0 # True negatives\n for i, doc in enumerate(textcat.pipe(docs)):\n gold = cats[i]\n for label, score in doc.cats.items():\n", "issue": "Initialization in text \nIn `evaluate `method from \r\n https://github.com/explosion/spacy/blob/master/examples/training/train_textcat.py\r\ntp, fp, fn, and tn (true positives etc) are initialized to 1e-8 (see below)\r\n\r\n```\r\ntp = 1e-8 # True positives\r\nfp = 1e-8 # False positives\r\nfn = 1e-8 # False negative\r\ntn = 1e-8 # True negatives\r\n```\r\n\r\nWhy are the values not initlaized to 0? When there are no true positives the precision is 0.5, That doesn't seem right.\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf8\n\"\"\"Train a convolutional neural network text classifier on the\nIMDB dataset, using the TextCategorizer component. The dataset will be loaded\nautomatically via Thinc's built-in dataset loader. The model is added to\nspacy.pipeline, and predictions are available via `doc.cats`. For more details,\nsee the documentation:\n* Training: https://spacy.io/usage/training\n\nCompatible with: spaCy v2.0.0+\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport plac\nimport random\nfrom pathlib import Path\nimport thinc.extra.datasets\n\nimport spacy\nfrom spacy.util import minibatch, compounding\n\n\[email protected](\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_texts=(\"Number of texts to train from\", \"option\", \"t\", int),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int))\ndef main(model=None, output_dir=None, n_iter=20, n_texts=2000):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # add the text classifier to the pipeline if it doesn't exist\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'textcat' not in nlp.pipe_names:\n textcat = nlp.create_pipe('textcat')\n nlp.add_pipe(textcat, last=True)\n # otherwise, get it, so we can add labels to it\n else:\n textcat = nlp.get_pipe('textcat')\n\n # add label to text classifier\n textcat.add_label('POSITIVE')\n\n # load the IMDB dataset\n print(\"Loading IMDB data...\")\n (train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)\n print(\"Using {} examples ({} training, {} evaluation)\"\n .format(n_texts, len(train_texts), len(dev_texts)))\n train_data = list(zip(train_texts,\n [{'cats': cats} for cats in train_cats]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']\n with nlp.disable_pipes(*other_pipes): # only train textcat\n optimizer = nlp.begin_training()\n print(\"Training the model...\")\n print('{:^5}\\t{:^5}\\t{:^5}\\t{:^5}'.format('LOSS', 'P', 'R', 'F'))\n for i in range(n_iter):\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(train_data, size=compounding(4., 32., 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.2,\n losses=losses)\n with textcat.model.use_params(optimizer.averages):\n # evaluate on the dev data split off in load_data()\n scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)\n print('{0:.3f}\\t{1:.3f}\\t{2:.3f}\\t{3:.3f}' # print a simple table\n .format(losses['textcat'], scores['textcat_p'],\n scores['textcat_r'], scores['textcat_f']))\n\n # test the trained model\n test_text = \"This movie sucked\"\n doc = nlp(test_text)\n print(test_text, doc.cats)\n\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc2 = nlp2(test_text)\n print(test_text, doc2.cats)\n\n\ndef load_data(limit=0, split=0.8):\n \"\"\"Load data from the IMDB dataset.\"\"\"\n # Partition off part of the train data for evaluation\n train_data, _ = thinc.extra.datasets.imdb()\n random.shuffle(train_data)\n train_data = train_data[-limit:]\n texts, labels = zip(*train_data)\n cats = [{'POSITIVE': bool(y)} for y in labels]\n split = int(len(train_data) * split)\n return (texts[:split], cats[:split]), (texts[split:], cats[split:])\n\n\ndef evaluate(tokenizer, textcat, texts, cats):\n docs = (tokenizer(text) for text in texts)\n tp = 1e-8 # True positives\n fp = 1e-8 # False positives\n fn = 1e-8 # False negatives\n tn = 1e-8 # True negatives\n for i, doc in enumerate(textcat.pipe(docs)):\n gold = cats[i]\n for label, score in doc.cats.items():\n if label not in gold:\n continue\n if score >= 0.5 and gold[label] >= 0.5:\n tp += 1.\n elif score >= 0.5 and gold[label] < 0.5:\n fp += 1.\n elif score < 0.5 and gold[label] < 0.5:\n tn += 1\n elif score < 0.5 and gold[label] >= 0.5:\n fn += 1\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_score = 2 * (precision * recall) / (precision + recall)\n return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}\n\n\nif __name__ == '__main__':\n plac.call(main)\n", "path": "examples/training/train_textcat.py"}]} | 2,339 | 190 |
gh_patches_debug_17796 | rasdani/github-patches | git_diff | pypa__setuptools-522 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pkg_resources._vendor.packaging.requirements.InvalidRequirement: Invalid requirement, parse error at "'and plat'"
Originally reported by: **Anonymous**
---
../ve/bin/python setup.py --quiet develop
Traceback (most recent call last):
File "setup.py", line 92, in <module>
setup(**config)
File "/usr/lib64/python2.7/distutils/core.py", line 152, in setup
dist.run_commands()
File "/usr/lib64/python2.7/distutils/dist.py", line 953, in run_commands
self.run_command(cmd)
File "/usr/lib64/python2.7/distutils/dist.py", line 972, in run_command
cmd_obj.run()
File "build/bdist.linux-x86_64/egg/setuptools/command/develop.py", line 34, in run
File "build/bdist.linux-x86_64/egg/setuptools/command/develop.py", line 133, in install_for_development
File "build/bdist.linux-x86_64/egg/setuptools/command/easy_install.py", line 720, in process_distribution
File "build/bdist.linux-x86_64/egg/pkg_resources/**init**.py", line 838, in resolve
File "build/bdist.linux-x86_64/egg/pkg_resources/**init**.py", line 2462, in requires
File "build/bdist.linux-x86_64/egg/pkg_resources/**init**.py", line 2686, in _dep_map
File "build/bdist.linux-x86_64/egg/pkg_resources/__init__.py", line 2696, in _compute_dependencies
File "build/bdist.linux-x86_64/egg/pkg_resources/_vendor/packaging/requirements.py", line 94, in __init__
pkg_resources._vendor.packaging.requirements.InvalidRequirement: Invalid requirement, parse error at "'and plat'"
Version 20.1 works fine, 20.2 breaks.
---
- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/503
</issue>
<code>
[start of pkg_resources/_vendor/packaging/__about__.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4 from __future__ import absolute_import, division, print_function
5
6 __all__ = [
7 "__title__", "__summary__", "__uri__", "__version__", "__author__",
8 "__email__", "__license__", "__copyright__",
9 ]
10
11 __title__ = "packaging"
12 __summary__ = "Core utilities for Python packages"
13 __uri__ = "https://github.com/pypa/packaging"
14
15 __version__ = "16.5"
16
17 __author__ = "Donald Stufft and individual contributors"
18 __email__ = "[email protected]"
19
20 __license__ = "BSD or Apache License, Version 2.0"
21 __copyright__ = "Copyright 2014-2016 %s" % __author__
22
[end of pkg_resources/_vendor/packaging/__about__.py]
[start of pkg_resources/_vendor/packaging/markers.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4 from __future__ import absolute_import, division, print_function
5
6 import operator
7 import os
8 import platform
9 import sys
10
11 from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
12 from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
13 from pkg_resources.extern.pyparsing import Literal as L # noqa
14
15 from ._compat import string_types
16 from .specifiers import Specifier, InvalidSpecifier
17
18
19 __all__ = [
20 "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
21 "Marker", "default_environment",
22 ]
23
24
25 class InvalidMarker(ValueError):
26 """
27 An invalid marker was found, users should refer to PEP 508.
28 """
29
30
31 class UndefinedComparison(ValueError):
32 """
33 An invalid operation was attempted on a value that doesn't support it.
34 """
35
36
37 class UndefinedEnvironmentName(ValueError):
38 """
39 A name was attempted to be used that does not exist inside of the
40 environment.
41 """
42
43
44 class Node(object):
45
46 def __init__(self, value):
47 self.value = value
48
49 def __str__(self):
50 return str(self.value)
51
52 def __repr__(self):
53 return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
54
55
56 class Variable(Node):
57 pass
58
59
60 class Value(Node):
61 pass
62
63
64 VARIABLE = (
65 L("implementation_version") |
66 L("platform_python_implementation") |
67 L("implementation_name") |
68 L("python_full_version") |
69 L("platform_release") |
70 L("platform_version") |
71 L("platform_machine") |
72 L("platform_system") |
73 L("python_version") |
74 L("sys_platform") |
75 L("os_name") |
76 L("extra")
77 )
78 VARIABLE.setParseAction(lambda s, l, t: Variable(t[0]))
79
80 VERSION_CMP = (
81 L("===") |
82 L("==") |
83 L(">=") |
84 L("<=") |
85 L("!=") |
86 L("~=") |
87 L(">") |
88 L("<")
89 )
90
91 MARKER_OP = VERSION_CMP | L("not in") | L("in")
92
93 MARKER_VALUE = QuotedString("'") | QuotedString('"')
94 MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
95
96 BOOLOP = L("and") | L("or")
97
98 MARKER_VAR = VARIABLE | MARKER_VALUE
99
100 MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
101 MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
102
103 LPAREN = L("(").suppress()
104 RPAREN = L(")").suppress()
105
106 MARKER_EXPR = Forward()
107 MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
108 MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
109
110 MARKER = stringStart + MARKER_EXPR + stringEnd
111
112
113 def _coerce_parse_result(results):
114 if isinstance(results, ParseResults):
115 return [_coerce_parse_result(i) for i in results]
116 else:
117 return results
118
119
120 def _format_marker(marker, first=True):
121 assert isinstance(marker, (list, tuple, string_types))
122
123 # Sometimes we have a structure like [[...]] which is a single item list
124 # where the single item is itself it's own list. In that case we want skip
125 # the rest of this function so that we don't get extraneous () on the
126 # outside.
127 if (isinstance(marker, list) and len(marker) == 1 and
128 isinstance(marker[0], (list, tuple))):
129 return _format_marker(marker[0])
130
131 if isinstance(marker, list):
132 inner = (_format_marker(m, first=False) for m in marker)
133 if first:
134 return " ".join(inner)
135 else:
136 return "(" + " ".join(inner) + ")"
137 elif isinstance(marker, tuple):
138 return '{0} {1} "{2}"'.format(*marker)
139 else:
140 return marker
141
142
143 _operators = {
144 "in": lambda lhs, rhs: lhs in rhs,
145 "not in": lambda lhs, rhs: lhs not in rhs,
146 "<": operator.lt,
147 "<=": operator.le,
148 "==": operator.eq,
149 "!=": operator.ne,
150 ">=": operator.ge,
151 ">": operator.gt,
152 }
153
154
155 def _eval_op(lhs, op, rhs):
156 try:
157 spec = Specifier("".join([op, rhs]))
158 except InvalidSpecifier:
159 pass
160 else:
161 return spec.contains(lhs)
162
163 oper = _operators.get(op)
164 if oper is None:
165 raise UndefinedComparison(
166 "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
167 )
168
169 return oper(lhs, rhs)
170
171
172 _undefined = object()
173
174
175 def _get_env(environment, name):
176 value = environment.get(name, _undefined)
177
178 if value is _undefined:
179 raise UndefinedEnvironmentName(
180 "{0!r} does not exist in evaluation environment.".format(name)
181 )
182
183 return value
184
185
186 def _evaluate_markers(markers, environment):
187 groups = [[]]
188
189 for marker in markers:
190 assert isinstance(marker, (list, tuple, string_types))
191
192 if isinstance(marker, list):
193 groups[-1].append(_evaluate_markers(marker, environment))
194 elif isinstance(marker, tuple):
195 lhs, op, rhs = marker
196
197 if isinstance(lhs, Variable):
198 lhs_value = _get_env(environment, lhs.value)
199 rhs_value = rhs.value
200 else:
201 lhs_value = lhs.value
202 rhs_value = _get_env(environment, rhs.value)
203
204 groups[-1].append(_eval_op(lhs_value, op, rhs_value))
205 else:
206 assert marker in ["and", "or"]
207 if marker == "or":
208 groups.append([])
209
210 return any(all(item) for item in groups)
211
212
213 def format_full_version(info):
214 version = '{0.major}.{0.minor}.{0.micro}'.format(info)
215 kind = info.releaselevel
216 if kind != 'final':
217 version += kind[0] + str(info.serial)
218 return version
219
220
221 def default_environment():
222 if hasattr(sys, 'implementation'):
223 iver = format_full_version(sys.implementation.version)
224 implementation_name = sys.implementation.name
225 else:
226 iver = '0'
227 implementation_name = ''
228
229 return {
230 "implementation_name": implementation_name,
231 "implementation_version": iver,
232 "os_name": os.name,
233 "platform_machine": platform.machine(),
234 "platform_release": platform.release(),
235 "platform_system": platform.system(),
236 "platform_version": platform.version(),
237 "python_full_version": platform.python_version(),
238 "platform_python_implementation": platform.python_implementation(),
239 "python_version": platform.python_version()[:3],
240 "sys_platform": sys.platform,
241 }
242
243
244 class Marker(object):
245
246 def __init__(self, marker):
247 try:
248 self._markers = _coerce_parse_result(MARKER.parseString(marker))
249 except ParseException as e:
250 err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
251 marker, marker[e.loc:e.loc + 8])
252 raise InvalidMarker(err_str)
253
254 def __str__(self):
255 return _format_marker(self._markers)
256
257 def __repr__(self):
258 return "<Marker({0!r})>".format(str(self))
259
260 def evaluate(self, environment=None):
261 """Evaluate a marker.
262
263 Return the boolean from evaluating the given marker against the
264 environment. environment is an optional argument to override all or
265 part of the determined environment.
266
267 The environment is determined from the current Python process.
268 """
269 current_environment = default_environment()
270 if environment is not None:
271 current_environment.update(environment)
272
273 return _evaluate_markers(self._markers, current_environment)
274
[end of pkg_resources/_vendor/packaging/markers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pkg_resources/_vendor/packaging/__about__.py b/pkg_resources/_vendor/packaging/__about__.py
--- a/pkg_resources/_vendor/packaging/__about__.py
+++ b/pkg_resources/_vendor/packaging/__about__.py
@@ -12,7 +12,7 @@
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
-__version__ = "16.5"
+__version__ = "16.6"
__author__ = "Donald Stufft and individual contributors"
__email__ = "[email protected]"
diff --git a/pkg_resources/_vendor/packaging/markers.py b/pkg_resources/_vendor/packaging/markers.py
--- a/pkg_resources/_vendor/packaging/markers.py
+++ b/pkg_resources/_vendor/packaging/markers.py
@@ -73,9 +73,14 @@
L("python_version") |
L("sys_platform") |
L("os_name") |
+ L("os.name") | # PEP-345
+ L("sys.platform") | # PEP-345
+ L("platform.version") | # PEP-345
+ L("platform.machine") | # PEP-345
+ L("platform.python_implementation") | # PEP-345
L("extra")
)
-VARIABLE.setParseAction(lambda s, l, t: Variable(t[0]))
+VARIABLE.setParseAction(lambda s, l, t: Variable(t[0].replace('.', '_')))
VERSION_CMP = (
L("===") |
| {"golden_diff": "diff --git a/pkg_resources/_vendor/packaging/__about__.py b/pkg_resources/_vendor/packaging/__about__.py\n--- a/pkg_resources/_vendor/packaging/__about__.py\n+++ b/pkg_resources/_vendor/packaging/__about__.py\n@@ -12,7 +12,7 @@\n __summary__ = \"Core utilities for Python packages\"\n __uri__ = \"https://github.com/pypa/packaging\"\n \n-__version__ = \"16.5\"\n+__version__ = \"16.6\"\n \n __author__ = \"Donald Stufft and individual contributors\"\n __email__ = \"[email protected]\"\ndiff --git a/pkg_resources/_vendor/packaging/markers.py b/pkg_resources/_vendor/packaging/markers.py\n--- a/pkg_resources/_vendor/packaging/markers.py\n+++ b/pkg_resources/_vendor/packaging/markers.py\n@@ -73,9 +73,14 @@\n L(\"python_version\") |\n L(\"sys_platform\") |\n L(\"os_name\") |\n+ L(\"os.name\") | # PEP-345\n+ L(\"sys.platform\") | # PEP-345\n+ L(\"platform.version\") | # PEP-345\n+ L(\"platform.machine\") | # PEP-345\n+ L(\"platform.python_implementation\") | # PEP-345\n L(\"extra\")\n )\n-VARIABLE.setParseAction(lambda s, l, t: Variable(t[0]))\n+VARIABLE.setParseAction(lambda s, l, t: Variable(t[0].replace('.', '_')))\n \n VERSION_CMP = (\n L(\"===\") |\n", "issue": "pkg_resources._vendor.packaging.requirements.InvalidRequirement: Invalid requirement, parse error at \"'and plat'\"\nOriginally reported by: **Anonymous**\n\n---\n\n../ve/bin/python setup.py --quiet develop\nTraceback (most recent call last):\n File \"setup.py\", line 92, in <module>\n setup(**config)\n File \"/usr/lib64/python2.7/distutils/core.py\", line 152, in setup\n dist.run_commands()\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"build/bdist.linux-x86_64/egg/setuptools/command/develop.py\", line 34, in run\n File \"build/bdist.linux-x86_64/egg/setuptools/command/develop.py\", line 133, in install_for_development\n File \"build/bdist.linux-x86_64/egg/setuptools/command/easy_install.py\", line 720, in process_distribution\n\n File \"build/bdist.linux-x86_64/egg/pkg_resources/**init**.py\", line 838, in resolve\n File \"build/bdist.linux-x86_64/egg/pkg_resources/**init**.py\", line 2462, in requires\n File \"build/bdist.linux-x86_64/egg/pkg_resources/**init**.py\", line 2686, in _dep_map\n File \"build/bdist.linux-x86_64/egg/pkg_resources/__init__.py\", line 2696, in _compute_dependencies\n File \"build/bdist.linux-x86_64/egg/pkg_resources/_vendor/packaging/requirements.py\", line 94, in __init__\npkg_resources._vendor.packaging.requirements.InvalidRequirement: Invalid requirement, parse error at \"'and plat'\"\n\nVersion 20.1 works fine, 20.2 breaks.\n\n---\n- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/503\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"packaging\"\n__summary__ = \"Core utilities for Python packages\"\n__uri__ = \"https://github.com/pypa/packaging\"\n\n__version__ = \"16.5\"\n\n__author__ = \"Donald Stufft and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2014-2016 %s\" % __author__\n", "path": "pkg_resources/_vendor/packaging/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\nfrom __future__ import absolute_import, division, print_function\n\nimport operator\nimport os\nimport platform\nimport sys\n\nfrom pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd\nfrom pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString\nfrom pkg_resources.extern.pyparsing import Literal as L # noqa\n\nfrom ._compat import string_types\nfrom .specifiers import Specifier, InvalidSpecifier\n\n\n__all__ = [\n \"InvalidMarker\", \"UndefinedComparison\", \"UndefinedEnvironmentName\",\n \"Marker\", \"default_environment\",\n]\n\n\nclass InvalidMarker(ValueError):\n \"\"\"\n An invalid marker was found, users should refer to PEP 508.\n \"\"\"\n\n\nclass UndefinedComparison(ValueError):\n \"\"\"\n An invalid operation was attempted on a value that doesn't support it.\n \"\"\"\n\n\nclass UndefinedEnvironmentName(ValueError):\n \"\"\"\n A name was attempted to be used that does not exist inside of the\n environment.\n \"\"\"\n\n\nclass Node(object):\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return str(self.value)\n\n def __repr__(self):\n return \"<{0}({1!r})>\".format(self.__class__.__name__, str(self))\n\n\nclass Variable(Node):\n pass\n\n\nclass Value(Node):\n pass\n\n\nVARIABLE = (\n L(\"implementation_version\") |\n L(\"platform_python_implementation\") |\n L(\"implementation_name\") |\n L(\"python_full_version\") |\n L(\"platform_release\") |\n L(\"platform_version\") |\n L(\"platform_machine\") |\n L(\"platform_system\") |\n L(\"python_version\") |\n L(\"sys_platform\") |\n L(\"os_name\") |\n L(\"extra\")\n)\nVARIABLE.setParseAction(lambda s, l, t: Variable(t[0]))\n\nVERSION_CMP = (\n L(\"===\") |\n L(\"==\") |\n L(\">=\") |\n L(\"<=\") |\n L(\"!=\") |\n L(\"~=\") |\n L(\">\") |\n L(\"<\")\n)\n\nMARKER_OP = VERSION_CMP | L(\"not in\") | L(\"in\")\n\nMARKER_VALUE = QuotedString(\"'\") | QuotedString('\"')\nMARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))\n\nBOOLOP = L(\"and\") | L(\"or\")\n\nMARKER_VAR = VARIABLE | MARKER_VALUE\n\nMARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)\nMARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))\n\nLPAREN = L(\"(\").suppress()\nRPAREN = L(\")\").suppress()\n\nMARKER_EXPR = Forward()\nMARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)\nMARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)\n\nMARKER = stringStart + MARKER_EXPR + stringEnd\n\n\ndef _coerce_parse_result(results):\n if isinstance(results, ParseResults):\n return [_coerce_parse_result(i) for i in results]\n else:\n return results\n\n\ndef _format_marker(marker, first=True):\n assert isinstance(marker, (list, tuple, string_types))\n\n # Sometimes we have a structure like [[...]] which is a single item list\n # where the single item is itself it's own list. In that case we want skip\n # the rest of this function so that we don't get extraneous () on the\n # outside.\n if (isinstance(marker, list) and len(marker) == 1 and\n isinstance(marker[0], (list, tuple))):\n return _format_marker(marker[0])\n\n if isinstance(marker, list):\n inner = (_format_marker(m, first=False) for m in marker)\n if first:\n return \" \".join(inner)\n else:\n return \"(\" + \" \".join(inner) + \")\"\n elif isinstance(marker, tuple):\n return '{0} {1} \"{2}\"'.format(*marker)\n else:\n return marker\n\n\n_operators = {\n \"in\": lambda lhs, rhs: lhs in rhs,\n \"not in\": lambda lhs, rhs: lhs not in rhs,\n \"<\": operator.lt,\n \"<=\": operator.le,\n \"==\": operator.eq,\n \"!=\": operator.ne,\n \">=\": operator.ge,\n \">\": operator.gt,\n}\n\n\ndef _eval_op(lhs, op, rhs):\n try:\n spec = Specifier(\"\".join([op, rhs]))\n except InvalidSpecifier:\n pass\n else:\n return spec.contains(lhs)\n\n oper = _operators.get(op)\n if oper is None:\n raise UndefinedComparison(\n \"Undefined {0!r} on {1!r} and {2!r}.\".format(op, lhs, rhs)\n )\n\n return oper(lhs, rhs)\n\n\n_undefined = object()\n\n\ndef _get_env(environment, name):\n value = environment.get(name, _undefined)\n\n if value is _undefined:\n raise UndefinedEnvironmentName(\n \"{0!r} does not exist in evaluation environment.\".format(name)\n )\n\n return value\n\n\ndef _evaluate_markers(markers, environment):\n groups = [[]]\n\n for marker in markers:\n assert isinstance(marker, (list, tuple, string_types))\n\n if isinstance(marker, list):\n groups[-1].append(_evaluate_markers(marker, environment))\n elif isinstance(marker, tuple):\n lhs, op, rhs = marker\n\n if isinstance(lhs, Variable):\n lhs_value = _get_env(environment, lhs.value)\n rhs_value = rhs.value\n else:\n lhs_value = lhs.value\n rhs_value = _get_env(environment, rhs.value)\n\n groups[-1].append(_eval_op(lhs_value, op, rhs_value))\n else:\n assert marker in [\"and\", \"or\"]\n if marker == \"or\":\n groups.append([])\n\n return any(all(item) for item in groups)\n\n\ndef format_full_version(info):\n version = '{0.major}.{0.minor}.{0.micro}'.format(info)\n kind = info.releaselevel\n if kind != 'final':\n version += kind[0] + str(info.serial)\n return version\n\n\ndef default_environment():\n if hasattr(sys, 'implementation'):\n iver = format_full_version(sys.implementation.version)\n implementation_name = sys.implementation.name\n else:\n iver = '0'\n implementation_name = ''\n\n return {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": platform.python_version()[:3],\n \"sys_platform\": sys.platform,\n }\n\n\nclass Marker(object):\n\n def __init__(self, marker):\n try:\n self._markers = _coerce_parse_result(MARKER.parseString(marker))\n except ParseException as e:\n err_str = \"Invalid marker: {0!r}, parse error at {1!r}\".format(\n marker, marker[e.loc:e.loc + 8])\n raise InvalidMarker(err_str)\n\n def __str__(self):\n return _format_marker(self._markers)\n\n def __repr__(self):\n return \"<Marker({0!r})>\".format(str(self))\n\n def evaluate(self, environment=None):\n \"\"\"Evaluate a marker.\n\n Return the boolean from evaluating the given marker against the\n environment. environment is an optional argument to override all or\n part of the determined environment.\n\n The environment is determined from the current Python process.\n \"\"\"\n current_environment = default_environment()\n if environment is not None:\n current_environment.update(environment)\n\n return _evaluate_markers(self._markers, current_environment)\n", "path": "pkg_resources/_vendor/packaging/markers.py"}]} | 3,806 | 372 |
gh_patches_debug_39137 | rasdani/github-patches | git_diff | comic__grand-challenge.org-509 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove countdown in build_images
This task should work synchronously, and not rely on a countdown timer. Maybe create all the `RawImageFile`s first, then add these at the create time of `RawImageUploadSession`. Or, don't rely on a post_save signal, but manually invoke this when all of the database objects are ready.
This will then simplify the test code.
</issue>
<code>
[start of app/grandchallenge/cases/signals.py]
1 from django.db.models.signals import post_save
2 from django.dispatch import receiver
3
4 from grandchallenge.cases.models import (
5 RawImageUploadSession, UPLOAD_SESSION_STATE
6 )
7 from grandchallenge.cases.tasks import build_images
8 from grandchallenge.core.utils import disable_for_loaddata
9
10
11 @receiver(post_save, sender=RawImageUploadSession)
12 @disable_for_loaddata
13 def queue_build_image_job(
14 instance: RawImageUploadSession=None, created: bool=False,
15 *_, **__):
16 if created:
17 try:
18 task = build_images.apply_async(
19 args=(instance.pk,),
20 countdown=5, # Wait a bit - immediate start won't work!
21 )
22 instance.session_state = UPLOAD_SESSION_STATE.queued
23 instance.processing_task = task.id
24 instance.save()
25 except Exception as e:
26 instance.session_state = UPLOAD_SESSION_STATE.stopped
27 instance.error_message = f"Could not start job: {e}"
28 instance.save()
29 raise e
30
[end of app/grandchallenge/cases/signals.py]
[start of app/grandchallenge/cases/forms.py]
1 # -*- coding: utf-8 -*-
2 from typing import List
3
4 from crispy_forms.helper import FormHelper
5 from crispy_forms.layout import Submit
6 from django import forms
7 from django.db import transaction
8
9 from grandchallenge.cases.models import RawImageUploadSession, RawImageFile
10 from grandchallenge.jqfileupload.filters import reject_duplicate_filenames
11 from grandchallenge.jqfileupload.widgets import uploader
12 from grandchallenge.jqfileupload.widgets.uploader import (
13 UploadedAjaxFileList, StagedAjaxFile
14 )
15
16 upload_raw_files_widget = uploader.AjaxUploadWidget(
17 ajax_target_path="ajax/raw_files/",
18 multifile=True,
19 auto_commit=False,
20 upload_validators=[
21 reject_duplicate_filenames,
22 ],
23 )
24
25
26 class UploadRawImagesForm(forms.ModelForm):
27 files = UploadedAjaxFileList(
28 widget=upload_raw_files_widget,
29 label="Image files",
30 help_text=(
31 'Upload images for creating a new archive'
32 ),
33 )
34
35 def __init__(self, *args, **kwargs):
36 super(UploadRawImagesForm, self).__init__(*args, **kwargs)
37 self.helper = FormHelper()
38 self.helper.add_input(Submit("save", "Submit"))
39
40 def save(self, commit=True):
41 with transaction.atomic():
42 instance = super(UploadRawImagesForm, self).save(commit=commit)
43
44 # Create links between the created session and all uploaded files
45 uploaded_files = self.cleaned_data[
46 "files"] # type: List[StagedAjaxFile]
47
48 for uploaded_file in uploaded_files:
49 RawImageFile.objects.create(
50 upload_session=instance,
51 filename=uploaded_file.name,
52 staged_file_id=uploaded_file.uuid,
53 )
54
55 return instance
56
57 class Meta:
58 model = RawImageUploadSession
59 fields = ['files']
60
[end of app/grandchallenge/cases/forms.py]
[start of app/grandchallenge/cases/tasks.py]
1 import shutil
2 from pathlib import Path
3 from tempfile import mkdtemp
4 from typing import Tuple, Sequence
5 from uuid import UUID
6
7 from celery import shared_task
8 from django.db import transaction
9
10 from grandchallenge.cases.image_builders import ImageBuilderResult
11 from grandchallenge.cases.image_builders.metaio_mhd_mha import (
12 image_builder_mhd
13 )
14 from grandchallenge.cases.log import logger
15 from grandchallenge.cases.models import (
16 RawImageUploadSession, UPLOAD_SESSION_STATE, Image, ImageFile, RawImageFile
17 )
18 from grandchallenge.jqfileupload.widgets.uploader import (
19 StagedAjaxFile, NotFoundError
20 )
21
22
23 class ProvisioningError(Exception): pass
24
25
26 def populate_provisioning_directory(
27 raw_files: Sequence[RawImageFile],
28 provisioning_dir: Path):
29 """
30 Provisions provisioning_dir with the files associated using the given
31 list of RawImageFile objects.
32
33 Parameters
34 ----------
35 raw_files:
36 The list of RawImageFile that should be saved in the target
37 directory.
38
39 provisioning_dir: Path
40 The path where to copy the files.
41
42 Raises
43 ------
44 ProvisioningError:
45 Raised when not all files could be copied to the provisioning directory.
46 """
47 provisioning_dir = Path(provisioning_dir)
48
49 def copy_to_tmpdir(image_file: RawImageFile):
50 staged_file = StagedAjaxFile(image_file.staged_file_id)
51 if not staged_file.exists:
52 raise ValueError(
53 f"staged file {image_file.staged_file_id} does not exist")
54
55 with open(provisioning_dir / staged_file.name, "wb") as dest_file:
56 with staged_file.open() as src_file:
57 BUFFER_SIZE = 0x10000
58 first = True
59 while first or (len(buffer) >= BUFFER_SIZE):
60 first = False
61 buffer = src_file.read(BUFFER_SIZE)
62 dest_file.write(buffer)
63
64 exceptions_raised = 0
65 for raw_file in raw_files:
66 try:
67 copy_to_tmpdir(raw_file)
68 except Exception as e:
69 logger.exception(
70 f"populate_provisioning_directory exception "
71 f"for file: '{raw_file.filename}'")
72 exceptions_raised += 1
73
74 if exceptions_raised > 0:
75 raise ProvisioningError(
76 f"{exceptions_raised} errors occurred during provisioning of the "
77 f"image construction directory")
78
79
80 @transaction.atomic
81 def store_image(image: Image, all_image_files: Sequence[ImageFile]):
82 """
83 Stores an image in the database in a single transaction (or fails
84 accordingly). Associated image files are extracted from the
85 all_image_files argument and stored together with the image itself
86 in a single transaction.
87
88 Parameters
89 ----------
90 image: :class:`Image`
91 The image to store. The actual image files that are stored are extracted
92 from the second argument.
93
94 all_image_files: list of :class:`ImageFile`
95 An unordered list of ImageFile objects that might or might not belong
96 to the image provided as the first argument. The function automatically
97 extracts related images from the all_image_files argument to store
98 alongside the given image.
99 """
100 associated_files = [
101 _if for _if in all_image_files
102 if _if.image == image
103 ]
104 image.save()
105 for af in associated_files:
106 af.save()
107
108
109 IMAGE_BUILDER_ALGORITHMS = [
110 image_builder_mhd
111 ]
112
113
114 def remove_duplicate_files(session_files: Sequence[RawImageFile]) -> Tuple[
115 Sequence[RawImageFile], Sequence[RawImageFile]]:
116 """
117 Filters the given sequence of RawImageFile objects and removes all files
118 that have a nun-unqie filename.
119
120 Parameters
121 ----------
122 session_files: Sequence[RawImageFile]
123 List of RawImageFile objects thats filenames should be checked for
124 uniqueness.
125
126 Returns
127 -------
128 Two Sequence[RawImageFile]. The first sequence is the filtered session_files
129 list, the second list is a list of duplicates that were removed.
130 """
131 filename_lookup = {}
132 duplicates = []
133 for file in session_files:
134 if file.filename in filename_lookup:
135 duplicates.append(file)
136
137 looked_up_file = filename_lookup[file.filename]
138 if looked_up_file is not None:
139 duplicates.append(looked_up_file)
140 filename_lookup[file.filename] = None
141 else:
142 filename_lookup[file.filename] = file
143 return (
144 tuple(x for x in filename_lookup.values() if x is not None),
145 tuple(duplicates)
146 )
147
148
149 @shared_task(
150 autoretry_for=(RawImageUploadSession.DoesNotExist,),
151 default_retry_delay=60,
152 retry_kwargs={
153 'max_retries': 15
154 })
155 def build_images(upload_session_uuid: UUID):
156 """
157 Task which analyzes an upload session and attempts to extract and store
158 detected images assembled from files uploaded in the image session.
159
160 The task updates the state-filed of the associated
161 :class:`RawImageUploadSession` to indicate if it is running or has finished
162 computing.
163
164 Results are stored in:
165 - `RawImageUploadSession.error_message` if a general error occurred during
166 processing.
167 - The `RawImageFile.error` field of associated `RawImageFile` objects,
168 in case files could not be processed.
169
170 The operation of building images will delete associated `StagedAjaxFile`s
171 of analyzed images in order to free up space on the server (only done if the
172 function does not error out).
173
174 If a job fails due to a RawImageUploadSession.DoesNotExist error, the
175 job is queued for a retry (max 15 times).
176
177 Parameters
178 ----------
179 upload_session_uuid: UUID
180 The uuid of the upload sessions that should be analyzed.
181 """
182 upload_session = RawImageUploadSession.objects.get(pk=upload_session_uuid)
183 upload_session: RawImageUploadSession
184
185 if upload_session.session_state == UPLOAD_SESSION_STATE.queued:
186 tmp_dir = Path(mkdtemp(prefix="construct_image_volumes-"))
187 try:
188 try:
189 upload_session.session_state = UPLOAD_SESSION_STATE.running
190 upload_session.save()
191
192 session_files = RawImageFile.objects.filter(
193 upload_session=upload_session.pk).all()
194 session_files: Tuple[RawImageFile]
195
196 session_files, duplicates = \
197 remove_duplicate_files(session_files)
198 for duplicate in duplicates:
199 duplicate: RawImageFile
200 duplicate.error = "Filename not unique"
201 saf = StagedAjaxFile(duplicate.staged_file_id)
202 duplicate.staged_file_id = None
203 saf.delete()
204 duplicate.save()
205
206 populate_provisioning_directory(session_files, tmp_dir)
207
208 filename_lookup = {
209 StagedAjaxFile(
210 raw_image_file.staged_file_id).name: raw_image_file
211 for raw_image_file in session_files
212 }
213 unconsumed_filenames = set(filename_lookup.keys())
214
215 collected_images = []
216 collected_associated_files = []
217 for algorithm in IMAGE_BUILDER_ALGORITHMS:
218 algorithm_result = algorithm(tmp_dir)
219 algorithm_result: ImageBuilderResult
220
221 collected_images += list(algorithm_result.new_images)
222 collected_associated_files += \
223 list(algorithm_result.new_image_files)
224
225 for filename in algorithm_result.consumed_files:
226 if filename in unconsumed_filenames:
227 unconsumed_filenames.remove(filename)
228 for filename, msg in algorithm_result.file_errors_map.items():
229 if filename in unconsumed_filenames:
230 unconsumed_filenames.remove(filename)
231 raw_image = filename_lookup[filename]
232 raw_file: RawImageFile
233 raw_image.error = str(msg)[:256]
234 raw_image.save()
235
236 for image in collected_images:
237 image.origin = upload_session
238 store_image(image, collected_associated_files)
239 for unconsumed_filename in unconsumed_filenames:
240 raw_file = filename_lookup[unconsumed_filename]
241 raw_file.error = \
242 "File could not be processed by any image builder"
243
244 # Delete any touched file data
245 for file in session_files:
246 try:
247 saf = StagedAjaxFile(file.staged_file_id)
248 file.staged_file_id = None
249 saf.delete()
250 file.save()
251 except NotFoundError:
252 pass
253 except Exception as e:
254 upload_session.error_message = str(e)
255 finally:
256 if tmp_dir is not None:
257 shutil.rmtree(tmp_dir)
258
259 upload_session.session_state = UPLOAD_SESSION_STATE.stopped
260 upload_session.save()
261
[end of app/grandchallenge/cases/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/cases/forms.py b/app/grandchallenge/cases/forms.py
--- a/app/grandchallenge/cases/forms.py
+++ b/app/grandchallenge/cases/forms.py
@@ -33,24 +33,29 @@
)
def __init__(self, *args, **kwargs):
- super(UploadRawImagesForm, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit("save", "Submit"))
def save(self, commit=True):
- with transaction.atomic():
- instance = super(UploadRawImagesForm, self).save(commit=commit)
-
- # Create links between the created session and all uploaded files
- uploaded_files = self.cleaned_data[
- "files"] # type: List[StagedAjaxFile]
-
- for uploaded_file in uploaded_files:
- RawImageFile.objects.create(
- upload_session=instance,
- filename=uploaded_file.name,
- staged_file_id=uploaded_file.uuid,
- )
+ instance = super().save(commit=False)
+
+ # Create links between the created session and all uploaded files
+ uploaded_files = self.cleaned_data[
+ "files"] # type: List[StagedAjaxFile]
+
+ raw_files = [
+ RawImageFile(
+ upload_session=instance,
+ filename=uploaded_file.name,
+ staged_file_id=uploaded_file.uuid,
+ ) for uploaded_file in uploaded_files
+ ]
+
+ if commit:
+ with transaction.atomic():
+ instance.save()
+ RawImageFile.objects.bulk_create(raw_files)
return instance
diff --git a/app/grandchallenge/cases/signals.py b/app/grandchallenge/cases/signals.py
--- a/app/grandchallenge/cases/signals.py
+++ b/app/grandchallenge/cases/signals.py
@@ -11,17 +11,20 @@
@receiver(post_save, sender=RawImageUploadSession)
@disable_for_loaddata
def queue_build_image_job(
- instance: RawImageUploadSession=None, created: bool=False,
- *_, **__):
+ instance: RawImageUploadSession = None, created: bool = False, *_, **__
+):
if created:
try:
- task = build_images.apply_async(
- args=(instance.pk,),
- countdown=5, # Wait a bit - immediate start won't work!
+
+ RawImageUploadSession.objects.filter(pk=instance.pk).update(
+ session_state=UPLOAD_SESSION_STATE.queued,
+ processing_task=instance.pk
)
- instance.session_state = UPLOAD_SESSION_STATE.queued
- instance.processing_task = task.id
- instance.save()
+
+ build_images.apply_async(
+ task_id=str(instance.pk), args=(instance.pk,),
+ )
+
except Exception as e:
instance.session_state = UPLOAD_SESSION_STATE.stopped
instance.error_message = f"Could not start job: {e}"
diff --git a/app/grandchallenge/cases/tasks.py b/app/grandchallenge/cases/tasks.py
--- a/app/grandchallenge/cases/tasks.py
+++ b/app/grandchallenge/cases/tasks.py
@@ -146,12 +146,7 @@
)
-@shared_task(
- autoretry_for=(RawImageUploadSession.DoesNotExist,),
- default_retry_delay=60,
- retry_kwargs={
- 'max_retries': 15
- })
+@shared_task
def build_images(upload_session_uuid: UUID):
"""
Task which analyzes an upload session and attempts to extract and store
| {"golden_diff": "diff --git a/app/grandchallenge/cases/forms.py b/app/grandchallenge/cases/forms.py\n--- a/app/grandchallenge/cases/forms.py\n+++ b/app/grandchallenge/cases/forms.py\n@@ -33,24 +33,29 @@\n )\n \n def __init__(self, *args, **kwargs):\n- super(UploadRawImagesForm, self).__init__(*args, **kwargs)\n+ super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit(\"save\", \"Submit\"))\n \n def save(self, commit=True):\n- with transaction.atomic():\n- instance = super(UploadRawImagesForm, self).save(commit=commit)\n-\n- # Create links between the created session and all uploaded files\n- uploaded_files = self.cleaned_data[\n- \"files\"] # type: List[StagedAjaxFile]\n-\n- for uploaded_file in uploaded_files:\n- RawImageFile.objects.create(\n- upload_session=instance,\n- filename=uploaded_file.name,\n- staged_file_id=uploaded_file.uuid,\n- )\n+ instance = super().save(commit=False)\n+\n+ # Create links between the created session and all uploaded files\n+ uploaded_files = self.cleaned_data[\n+ \"files\"] # type: List[StagedAjaxFile]\n+\n+ raw_files = [\n+ RawImageFile(\n+ upload_session=instance,\n+ filename=uploaded_file.name,\n+ staged_file_id=uploaded_file.uuid,\n+ ) for uploaded_file in uploaded_files\n+ ]\n+\n+ if commit:\n+ with transaction.atomic():\n+ instance.save()\n+ RawImageFile.objects.bulk_create(raw_files)\n \n return instance\n \ndiff --git a/app/grandchallenge/cases/signals.py b/app/grandchallenge/cases/signals.py\n--- a/app/grandchallenge/cases/signals.py\n+++ b/app/grandchallenge/cases/signals.py\n@@ -11,17 +11,20 @@\n @receiver(post_save, sender=RawImageUploadSession)\n @disable_for_loaddata\n def queue_build_image_job(\n- instance: RawImageUploadSession=None, created: bool=False,\n- *_, **__):\n+ instance: RawImageUploadSession = None, created: bool = False, *_, **__\n+):\n if created:\n try:\n- task = build_images.apply_async(\n- args=(instance.pk,),\n- countdown=5, # Wait a bit - immediate start won't work!\n+\n+ RawImageUploadSession.objects.filter(pk=instance.pk).update(\n+ session_state=UPLOAD_SESSION_STATE.queued,\n+ processing_task=instance.pk\n )\n- instance.session_state = UPLOAD_SESSION_STATE.queued\n- instance.processing_task = task.id\n- instance.save()\n+\n+ build_images.apply_async(\n+ task_id=str(instance.pk), args=(instance.pk,),\n+ )\n+\n except Exception as e:\n instance.session_state = UPLOAD_SESSION_STATE.stopped\n instance.error_message = f\"Could not start job: {e}\"\ndiff --git a/app/grandchallenge/cases/tasks.py b/app/grandchallenge/cases/tasks.py\n--- a/app/grandchallenge/cases/tasks.py\n+++ b/app/grandchallenge/cases/tasks.py\n@@ -146,12 +146,7 @@\n )\n \n \n-@shared_task(\n- autoretry_for=(RawImageUploadSession.DoesNotExist,),\n- default_retry_delay=60,\n- retry_kwargs={\n- 'max_retries': 15\n- })\n+@shared_task\n def build_images(upload_session_uuid: UUID):\n \"\"\"\n Task which analyzes an upload session and attempts to extract and store\n", "issue": "Remove countdown in build_images \nThis task should work synchronously, and not rely on a countdown timer. Maybe create all the `RawImageFile`s first, then add these at the create time of `RawImageUploadSession`. Or, don't rely on a post_save signal, but manually invoke this when all of the database objects are ready.\r\n\r\nThis will then simplify the test code.\n", "before_files": [{"content": "from django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom grandchallenge.cases.models import (\n RawImageUploadSession, UPLOAD_SESSION_STATE\n)\nfrom grandchallenge.cases.tasks import build_images\nfrom grandchallenge.core.utils import disable_for_loaddata\n\n\n@receiver(post_save, sender=RawImageUploadSession)\n@disable_for_loaddata\ndef queue_build_image_job(\n instance: RawImageUploadSession=None, created: bool=False,\n *_, **__):\n if created:\n try:\n task = build_images.apply_async(\n args=(instance.pk,),\n countdown=5, # Wait a bit - immediate start won't work!\n )\n instance.session_state = UPLOAD_SESSION_STATE.queued\n instance.processing_task = task.id\n instance.save()\n except Exception as e:\n instance.session_state = UPLOAD_SESSION_STATE.stopped\n instance.error_message = f\"Could not start job: {e}\"\n instance.save()\n raise e\n", "path": "app/grandchallenge/cases/signals.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom typing import List\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.db import transaction\n\nfrom grandchallenge.cases.models import RawImageUploadSession, RawImageFile\nfrom grandchallenge.jqfileupload.filters import reject_duplicate_filenames\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import (\n UploadedAjaxFileList, StagedAjaxFile\n)\n\nupload_raw_files_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/raw_files/\",\n multifile=True,\n auto_commit=False,\n upload_validators=[\n reject_duplicate_filenames,\n ],\n)\n\n\nclass UploadRawImagesForm(forms.ModelForm):\n files = UploadedAjaxFileList(\n widget=upload_raw_files_widget,\n label=\"Image files\",\n help_text=(\n 'Upload images for creating a new archive'\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super(UploadRawImagesForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit(\"save\", \"Submit\"))\n\n def save(self, commit=True):\n with transaction.atomic():\n instance = super(UploadRawImagesForm, self).save(commit=commit)\n\n # Create links between the created session and all uploaded files\n uploaded_files = self.cleaned_data[\n \"files\"] # type: List[StagedAjaxFile]\n\n for uploaded_file in uploaded_files:\n RawImageFile.objects.create(\n upload_session=instance,\n filename=uploaded_file.name,\n staged_file_id=uploaded_file.uuid,\n )\n\n return instance\n\n class Meta:\n model = RawImageUploadSession\n fields = ['files']\n", "path": "app/grandchallenge/cases/forms.py"}, {"content": "import shutil\nfrom pathlib import Path\nfrom tempfile import mkdtemp\nfrom typing import Tuple, Sequence\nfrom uuid import UUID\n\nfrom celery import shared_task\nfrom django.db import transaction\n\nfrom grandchallenge.cases.image_builders import ImageBuilderResult\nfrom grandchallenge.cases.image_builders.metaio_mhd_mha import (\n image_builder_mhd\n)\nfrom grandchallenge.cases.log import logger\nfrom grandchallenge.cases.models import (\n RawImageUploadSession, UPLOAD_SESSION_STATE, Image, ImageFile, RawImageFile\n)\nfrom grandchallenge.jqfileupload.widgets.uploader import (\n StagedAjaxFile, NotFoundError\n)\n\n\nclass ProvisioningError(Exception): pass\n\n\ndef populate_provisioning_directory(\n raw_files: Sequence[RawImageFile],\n provisioning_dir: Path):\n \"\"\"\n Provisions provisioning_dir with the files associated using the given\n list of RawImageFile objects.\n\n Parameters\n ----------\n raw_files:\n The list of RawImageFile that should be saved in the target\n directory.\n\n provisioning_dir: Path\n The path where to copy the files.\n\n Raises\n ------\n ProvisioningError:\n Raised when not all files could be copied to the provisioning directory.\n \"\"\"\n provisioning_dir = Path(provisioning_dir)\n\n def copy_to_tmpdir(image_file: RawImageFile):\n staged_file = StagedAjaxFile(image_file.staged_file_id)\n if not staged_file.exists:\n raise ValueError(\n f\"staged file {image_file.staged_file_id} does not exist\")\n\n with open(provisioning_dir / staged_file.name, \"wb\") as dest_file:\n with staged_file.open() as src_file:\n BUFFER_SIZE = 0x10000\n first = True\n while first or (len(buffer) >= BUFFER_SIZE):\n first = False\n buffer = src_file.read(BUFFER_SIZE)\n dest_file.write(buffer)\n\n exceptions_raised = 0\n for raw_file in raw_files:\n try:\n copy_to_tmpdir(raw_file)\n except Exception as e:\n logger.exception(\n f\"populate_provisioning_directory exception \"\n f\"for file: '{raw_file.filename}'\")\n exceptions_raised += 1\n\n if exceptions_raised > 0:\n raise ProvisioningError(\n f\"{exceptions_raised} errors occurred during provisioning of the \"\n f\"image construction directory\")\n\n\[email protected]\ndef store_image(image: Image, all_image_files: Sequence[ImageFile]):\n \"\"\"\n Stores an image in the database in a single transaction (or fails\n accordingly). Associated image files are extracted from the\n all_image_files argument and stored together with the image itself\n in a single transaction.\n\n Parameters\n ----------\n image: :class:`Image`\n The image to store. The actual image files that are stored are extracted\n from the second argument.\n\n all_image_files: list of :class:`ImageFile`\n An unordered list of ImageFile objects that might or might not belong\n to the image provided as the first argument. The function automatically\n extracts related images from the all_image_files argument to store\n alongside the given image.\n \"\"\"\n associated_files = [\n _if for _if in all_image_files\n if _if.image == image\n ]\n image.save()\n for af in associated_files:\n af.save()\n\n\nIMAGE_BUILDER_ALGORITHMS = [\n image_builder_mhd\n]\n\n\ndef remove_duplicate_files(session_files: Sequence[RawImageFile]) -> Tuple[\n Sequence[RawImageFile], Sequence[RawImageFile]]:\n \"\"\"\n Filters the given sequence of RawImageFile objects and removes all files\n that have a nun-unqie filename.\n\n Parameters\n ----------\n session_files: Sequence[RawImageFile]\n List of RawImageFile objects thats filenames should be checked for\n uniqueness.\n\n Returns\n -------\n Two Sequence[RawImageFile]. The first sequence is the filtered session_files\n list, the second list is a list of duplicates that were removed.\n \"\"\"\n filename_lookup = {}\n duplicates = []\n for file in session_files:\n if file.filename in filename_lookup:\n duplicates.append(file)\n\n looked_up_file = filename_lookup[file.filename]\n if looked_up_file is not None:\n duplicates.append(looked_up_file)\n filename_lookup[file.filename] = None\n else:\n filename_lookup[file.filename] = file\n return (\n tuple(x for x in filename_lookup.values() if x is not None),\n tuple(duplicates)\n )\n\n\n@shared_task(\n autoretry_for=(RawImageUploadSession.DoesNotExist,),\n default_retry_delay=60,\n retry_kwargs={\n 'max_retries': 15\n })\ndef build_images(upload_session_uuid: UUID):\n \"\"\"\n Task which analyzes an upload session and attempts to extract and store\n detected images assembled from files uploaded in the image session.\n\n The task updates the state-filed of the associated\n :class:`RawImageUploadSession` to indicate if it is running or has finished\n computing.\n\n Results are stored in:\n - `RawImageUploadSession.error_message` if a general error occurred during\n processing.\n - The `RawImageFile.error` field of associated `RawImageFile` objects,\n in case files could not be processed.\n\n The operation of building images will delete associated `StagedAjaxFile`s\n of analyzed images in order to free up space on the server (only done if the\n function does not error out).\n\n If a job fails due to a RawImageUploadSession.DoesNotExist error, the\n job is queued for a retry (max 15 times).\n\n Parameters\n ----------\n upload_session_uuid: UUID\n The uuid of the upload sessions that should be analyzed.\n \"\"\"\n upload_session = RawImageUploadSession.objects.get(pk=upload_session_uuid)\n upload_session: RawImageUploadSession\n\n if upload_session.session_state == UPLOAD_SESSION_STATE.queued:\n tmp_dir = Path(mkdtemp(prefix=\"construct_image_volumes-\"))\n try:\n try:\n upload_session.session_state = UPLOAD_SESSION_STATE.running\n upload_session.save()\n\n session_files = RawImageFile.objects.filter(\n upload_session=upload_session.pk).all()\n session_files: Tuple[RawImageFile]\n\n session_files, duplicates = \\\n remove_duplicate_files(session_files)\n for duplicate in duplicates:\n duplicate: RawImageFile\n duplicate.error = \"Filename not unique\"\n saf = StagedAjaxFile(duplicate.staged_file_id)\n duplicate.staged_file_id = None\n saf.delete()\n duplicate.save()\n\n populate_provisioning_directory(session_files, tmp_dir)\n\n filename_lookup = {\n StagedAjaxFile(\n raw_image_file.staged_file_id).name: raw_image_file\n for raw_image_file in session_files\n }\n unconsumed_filenames = set(filename_lookup.keys())\n\n collected_images = []\n collected_associated_files = []\n for algorithm in IMAGE_BUILDER_ALGORITHMS:\n algorithm_result = algorithm(tmp_dir)\n algorithm_result: ImageBuilderResult\n\n collected_images += list(algorithm_result.new_images)\n collected_associated_files += \\\n list(algorithm_result.new_image_files)\n\n for filename in algorithm_result.consumed_files:\n if filename in unconsumed_filenames:\n unconsumed_filenames.remove(filename)\n for filename, msg in algorithm_result.file_errors_map.items():\n if filename in unconsumed_filenames:\n unconsumed_filenames.remove(filename)\n raw_image = filename_lookup[filename]\n raw_file: RawImageFile\n raw_image.error = str(msg)[:256]\n raw_image.save()\n\n for image in collected_images:\n image.origin = upload_session\n store_image(image, collected_associated_files)\n for unconsumed_filename in unconsumed_filenames:\n raw_file = filename_lookup[unconsumed_filename]\n raw_file.error = \\\n \"File could not be processed by any image builder\"\n\n # Delete any touched file data\n for file in session_files:\n try:\n saf = StagedAjaxFile(file.staged_file_id)\n file.staged_file_id = None\n saf.delete()\n file.save()\n except NotFoundError:\n pass\n except Exception as e:\n upload_session.error_message = str(e)\n finally:\n if tmp_dir is not None:\n shutil.rmtree(tmp_dir)\n\n upload_session.session_state = UPLOAD_SESSION_STATE.stopped\n upload_session.save()\n", "path": "app/grandchallenge/cases/tasks.py"}]} | 3,918 | 806 |
gh_patches_debug_29106 | rasdani/github-patches | git_diff | apache__airflow-34931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Airflow 2.7.1 can not start Scheduler & trigger
### Apache Airflow version
2.7.1
### What happened
After upgrade from 2.6.0 to 2.7.1 (try pip uninstall apache-airflow, and clear dir airflow - remove airflow.cfg), I can start scheduler & trigger with daemon.
I try start with command, it can start, but logout console it killed.
I try: airflow scheduler or airflow triggerer :done but kill when logout console
airflow scheduler --daemon && airflow triggerer --daemon: fail, can not start scheduler & triggerer (2.6.0 run ok). but start deamon with webserver & celery worker is fine
Help me
### What you think should happen instead
_No response_
### How to reproduce
1. run airflow 2.6.0 fine on ubuntu server 22.04.3 lts
2. install airflow 2.7.1
3. can not start daemon triggerer & scheduler
### Operating System
ubuntu server 22.04.3 LTS
### Versions of Apache Airflow Providers
_No response_
### Deployment
Virtualenv installation
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/cli/commands/triggerer_command.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 """Triggerer command."""
18 from __future__ import annotations
19
20 import signal
21 from contextlib import contextmanager
22 from functools import partial
23 from multiprocessing import Process
24 from typing import Generator
25
26 import daemon
27 from daemon.pidfile import TimeoutPIDLockFile
28
29 from airflow import settings
30 from airflow.configuration import conf
31 from airflow.jobs.job import Job, run_job
32 from airflow.jobs.triggerer_job_runner import TriggererJobRunner
33 from airflow.utils import cli as cli_utils
34 from airflow.utils.cli import setup_locations, setup_logging, sigint_handler, sigquit_handler
35 from airflow.utils.providers_configuration_loader import providers_configuration_loaded
36 from airflow.utils.serve_logs import serve_logs
37
38
39 @contextmanager
40 def _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]:
41 """Start serve_logs sub-process."""
42 sub_proc = None
43 if skip_serve_logs is False:
44 port = conf.getint("logging", "trigger_log_server_port", fallback=8794)
45 sub_proc = Process(target=partial(serve_logs, port=port))
46 sub_proc.start()
47 try:
48 yield
49 finally:
50 if sub_proc:
51 sub_proc.terminate()
52
53
54 @cli_utils.action_cli
55 @providers_configuration_loaded
56 def triggerer(args):
57 """Start Airflow Triggerer."""
58 settings.MASK_SECRETS_IN_LOGS = True
59 print(settings.HEADER)
60 triggerer_heartrate = conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC")
61 triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)
62
63 if args.daemon:
64 pid, stdout, stderr, log_file = setup_locations(
65 "triggerer", args.pid, args.stdout, args.stderr, args.log_file
66 )
67 handle = setup_logging(log_file)
68 with open(stdout, "a") as stdout_handle, open(stderr, "a") as stderr_handle:
69 stdout_handle.truncate(0)
70 stderr_handle.truncate(0)
71
72 daemon_context = daemon.DaemonContext(
73 pidfile=TimeoutPIDLockFile(pid, -1),
74 files_preserve=[handle],
75 stdout=stdout_handle,
76 stderr=stderr_handle,
77 umask=int(settings.DAEMON_UMASK, 8),
78 )
79 with daemon_context, _serve_logs(args.skip_serve_logs):
80 run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
81 else:
82 signal.signal(signal.SIGINT, sigint_handler)
83 signal.signal(signal.SIGTERM, sigint_handler)
84 signal.signal(signal.SIGQUIT, sigquit_handler)
85 with _serve_logs(args.skip_serve_logs):
86 run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
87
[end of airflow/cli/commands/triggerer_command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/cli/commands/triggerer_command.py b/airflow/cli/commands/triggerer_command.py
--- a/airflow/cli/commands/triggerer_command.py
+++ b/airflow/cli/commands/triggerer_command.py
@@ -58,7 +58,6 @@
settings.MASK_SECRETS_IN_LOGS = True
print(settings.HEADER)
triggerer_heartrate = conf.getfloat("triggerer", "JOB_HEARTBEAT_SEC")
- triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations(
@@ -77,10 +76,16 @@
umask=int(settings.DAEMON_UMASK, 8),
)
with daemon_context, _serve_logs(args.skip_serve_logs):
+ triggerer_job_runner = TriggererJobRunner(
+ job=Job(heartrate=triggerer_heartrate), capacity=args.capacity
+ )
run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
with _serve_logs(args.skip_serve_logs):
+ triggerer_job_runner = TriggererJobRunner(
+ job=Job(heartrate=triggerer_heartrate), capacity=args.capacity
+ )
run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)
| {"golden_diff": "diff --git a/airflow/cli/commands/triggerer_command.py b/airflow/cli/commands/triggerer_command.py\n--- a/airflow/cli/commands/triggerer_command.py\n+++ b/airflow/cli/commands/triggerer_command.py\n@@ -58,7 +58,6 @@\n settings.MASK_SECRETS_IN_LOGS = True\n print(settings.HEADER)\n triggerer_heartrate = conf.getfloat(\"triggerer\", \"JOB_HEARTBEAT_SEC\")\n- triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)\n \n if args.daemon:\n pid, stdout, stderr, log_file = setup_locations(\n@@ -77,10 +76,16 @@\n umask=int(settings.DAEMON_UMASK, 8),\n )\n with daemon_context, _serve_logs(args.skip_serve_logs):\n+ triggerer_job_runner = TriggererJobRunner(\n+ job=Job(heartrate=triggerer_heartrate), capacity=args.capacity\n+ )\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n else:\n signal.signal(signal.SIGINT, sigint_handler)\n signal.signal(signal.SIGTERM, sigint_handler)\n signal.signal(signal.SIGQUIT, sigquit_handler)\n with _serve_logs(args.skip_serve_logs):\n+ triggerer_job_runner = TriggererJobRunner(\n+ job=Job(heartrate=triggerer_heartrate), capacity=args.capacity\n+ )\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n", "issue": "Airflow 2.7.1 can not start Scheduler & trigger\n### Apache Airflow version\n\n2.7.1\n\n### What happened\n\nAfter upgrade from 2.6.0 to 2.7.1 (try pip uninstall apache-airflow, and clear dir airflow - remove airflow.cfg), I can start scheduler & trigger with daemon. \r\nI try start with command, it can start, but logout console it killed.\r\nI try: airflow scheduler or airflow triggerer :done but kill when logout console\r\nairflow scheduler --daemon && airflow triggerer --daemon: fail, can not start scheduler & triggerer (2.6.0 run ok). but start deamon with webserver & celery worker is fine\r\n\r\nHelp me\n\n### What you think should happen instead\n\n_No response_\n\n### How to reproduce\n\n1. run airflow 2.6.0 fine on ubuntu server 22.04.3 lts\r\n2. install airflow 2.7.1 \r\n3. can not start daemon triggerer & scheduler\n\n### Operating System\n\nubuntu server 22.04.3 LTS\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nVirtualenv installation\n\n### Deployment details\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Triggerer command.\"\"\"\nfrom __future__ import annotations\n\nimport signal\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom multiprocessing import Process\nfrom typing import Generator\n\nimport daemon\nfrom daemon.pidfile import TimeoutPIDLockFile\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.jobs.job import Job, run_job\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import setup_locations, setup_logging, sigint_handler, sigquit_handler\nfrom airflow.utils.providers_configuration_loader import providers_configuration_loaded\nfrom airflow.utils.serve_logs import serve_logs\n\n\n@contextmanager\ndef _serve_logs(skip_serve_logs: bool = False) -> Generator[None, None, None]:\n \"\"\"Start serve_logs sub-process.\"\"\"\n sub_proc = None\n if skip_serve_logs is False:\n port = conf.getint(\"logging\", \"trigger_log_server_port\", fallback=8794)\n sub_proc = Process(target=partial(serve_logs, port=port))\n sub_proc.start()\n try:\n yield\n finally:\n if sub_proc:\n sub_proc.terminate()\n\n\n@cli_utils.action_cli\n@providers_configuration_loaded\ndef triggerer(args):\n \"\"\"Start Airflow Triggerer.\"\"\"\n settings.MASK_SECRETS_IN_LOGS = True\n print(settings.HEADER)\n triggerer_heartrate = conf.getfloat(\"triggerer\", \"JOB_HEARTBEAT_SEC\")\n triggerer_job_runner = TriggererJobRunner(job=Job(heartrate=triggerer_heartrate), capacity=args.capacity)\n\n if args.daemon:\n pid, stdout, stderr, log_file = setup_locations(\n \"triggerer\", args.pid, args.stdout, args.stderr, args.log_file\n )\n handle = setup_logging(log_file)\n with open(stdout, \"a\") as stdout_handle, open(stderr, \"a\") as stderr_handle:\n stdout_handle.truncate(0)\n stderr_handle.truncate(0)\n\n daemon_context = daemon.DaemonContext(\n pidfile=TimeoutPIDLockFile(pid, -1),\n files_preserve=[handle],\n stdout=stdout_handle,\n stderr=stderr_handle,\n umask=int(settings.DAEMON_UMASK, 8),\n )\n with daemon_context, _serve_logs(args.skip_serve_logs):\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n else:\n signal.signal(signal.SIGINT, sigint_handler)\n signal.signal(signal.SIGTERM, sigint_handler)\n signal.signal(signal.SIGQUIT, sigquit_handler)\n with _serve_logs(args.skip_serve_logs):\n run_job(job=triggerer_job_runner.job, execute_callable=triggerer_job_runner._execute)\n", "path": "airflow/cli/commands/triggerer_command.py"}]} | 1,800 | 366 |
gh_patches_debug_4434 | rasdani/github-patches | git_diff | sopel-irc__sopel-948 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
.ip socket.gaierror on Sopel 6.1.1 on python 3.5.0
OS X 11.10
``` python
WARNING:sopel.modules.ip:Downloading GeoIP database
Traceback (most recent call last):
File "/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/bot.py", line 260, in call
exit_code = func(sopel, trigger)
File "/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/modules/ip.py", line 118, in ip
response += " | Location: %s" % gi_city.country_name_by_name(query)
File "/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/pygeoip/__init__.py", line 502, in country_name_by_name
addr = self._gethostbyname(hostname)
File "/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/pygeoip/__init__.py", line 394, in _gethostbyname
return socket.gethostbyname(hostname)
socket.gaierror: [Errno 8] nodename nor servname provided, or not known
/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/bot.py:263: ResourceWarning: unclosed file <_io.BufferedReader name='/Users/thill/.sopel/GeoLiteCity.dat'>
self.error(trigger)
/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/bot.py:263: ResourceWarning: unclosed file <_io.BufferedReader name='/Users/thill/.sopel/GeoIPASNum.dat'>
self.error(trigger)
```
</issue>
<code>
[start of sopel/modules/ip.py]
1 # coding=utf-8
2 """GeoIP lookup module"""
3 # Copyright 2011, Dimitri Molenaars, TyRope.nl,
4 # Copyright © 2013, Elad Alfassa <[email protected]>
5 # Licensed under the Eiffel Forum License 2.
6
7 from __future__ import unicode_literals, absolute_import, print_function, division
8
9 import pygeoip
10 import socket
11 import os
12 import gzip
13
14 urlretrieve = None
15 try:
16 from urllib import urlretrieve
17 except ImportError:
18 try:
19 # urlretrieve has been put under urllib.request in Python 3.
20 # It's also deprecated so this should probably be replaced with
21 # urllib2.
22 from urllib.request import urlretrieve
23 except ImportError:
24 pass
25
26 from sopel.config.types import StaticSection, FilenameAttribute
27 from sopel.module import commands, example
28 from sopel.logger import get_logger
29
30 LOGGER = get_logger(__name__)
31
32
33 class GeoipSection(StaticSection):
34 GeoIP_db_path = FilenameAttribute('GeoIP_db_path', directory=True)
35 """Path of the directory containing the GeoIP db files."""
36
37
38 def configure(config):
39 config.define_section('ip', GeoipSection)
40 config.ip.configure_setting('GeoIP_db_path',
41 'Path of the GeoIP db files')
42
43
44 def setup(bot=None):
45 if not bot:
46 return # Because of some weird pytest thing?
47
48 bot.config.define_section('ip', GeoipSection)
49
50
51 def _decompress(source, target, delete_after_decompression=True):
52 """ Decompress a GZip file """
53 f_in = gzip.open(source, 'rb')
54 f_out = open(target, 'wb')
55 f_out.writelines(f_in)
56 f_out.close()
57 f_in.close()
58 if delete_after_decompression:
59 os.remove(source)
60
61
62 def _find_geoip_db(bot):
63 """ Find the GeoIP database """
64 config = bot.config
65 if config.ip.GeoIP_db_path:
66 cities_db = os.path.join(config.ip.GeoIP_db_path, 'GeoLiteCity.dat')
67 ipasnum_db = os.path.join(config.ip.GeoIP_db_path, 'GeoIPASNum.dat')
68 if os.path.isfile(cities_db) and os.path.isfile(ipasnum_db):
69 return config.ip.GeoIP_db_path
70 else:
71 LOGGER.warning(
72 'GeoIP path configured but DB not found in configured path'
73 )
74 if (os.path.isfile(os.path.join(bot.config.core.homedir, 'GeoLiteCity.dat')) and
75 os.path.isfile(os.path.join(bot.config.core.homedir, 'GeoIPASNum.dat'))):
76 return bot.config.core.homedir
77 elif (os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoLiteCity.dat')) and
78 os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoIPASNum.dat'))):
79 return '/usr/share/GeoIP'
80 elif urlretrieve:
81 LOGGER.warning('Downloading GeoIP database')
82 bot.say('Downloading GeoIP database, please wait...')
83 geolite_city_url = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz'
84 geolite_ASN_url = 'http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz'
85 geolite_city_filepath = os.path.join(bot.config.core.homedir, 'GeoLiteCity.dat.gz')
86 geolite_ASN_filepath = os.path.join(bot.config.core.homedir, 'GeoIPASNum.dat.gz')
87 urlretrieve(geolite_city_url, geolite_city_filepath)
88 urlretrieve(geolite_ASN_url, geolite_ASN_filepath)
89 _decompress(geolite_city_filepath, geolite_city_filepath[:-3])
90 _decompress(geolite_ASN_filepath, geolite_ASN_filepath[:-3])
91 return bot.config.core.homedir
92 else:
93 return False
94
95
96 @commands('iplookup', 'ip')
97 @example('.ip 8.8.8.8',
98 r'[IP/Host Lookup] Hostname: google-public-dns-a.google.com | Location: United States | Region: CA | ISP: AS15169 Google Inc.',
99 re=True,
100 ignore='Downloading GeoIP database, please wait...')
101 def ip(bot, trigger):
102 """IP Lookup tool"""
103 if not trigger.group(2):
104 return bot.reply("No search term.")
105 query = trigger.group(2)
106 db_path = _find_geoip_db(bot)
107 if db_path is False:
108 LOGGER.error('Can\'t find (or download) usable GeoIP database')
109 bot.say('Sorry, I don\'t have a GeoIP database to use for this lookup')
110 return False
111 geolite_city_filepath = os.path.join(_find_geoip_db(bot), 'GeoLiteCity.dat')
112 geolite_ASN_filepath = os.path.join(_find_geoip_db(bot), 'GeoIPASNum.dat')
113 gi_city = pygeoip.GeoIP(geolite_city_filepath)
114 gi_org = pygeoip.GeoIP(geolite_ASN_filepath)
115 host = socket.getfqdn(query)
116 response = "[IP/Host Lookup] Hostname: %s" % host
117 try:
118 response += " | Location: %s" % gi_city.country_name_by_name(query)
119 except AttributeError:
120 response += ' | Location: Unknown'
121
122 region_data = gi_city.region_by_name(query)
123 try:
124 region = region_data['region_code'] # pygeoip >= 0.3.0
125 except KeyError:
126 region = region_data['region_name'] # pygeoip < 0.3.0
127 if region:
128 response += " | Region: %s" % region
129
130 isp = gi_org.org_by_name(query)
131 response += " | ISP: %s" % isp
132 bot.say(response)
133
134
135 if __name__ == "__main__":
136 from sopel.test_tools import run_example_tests
137 run_example_tests(__file__)
138
[end of sopel/modules/ip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/ip.py b/sopel/modules/ip.py
--- a/sopel/modules/ip.py
+++ b/sopel/modules/ip.py
@@ -118,6 +118,8 @@
response += " | Location: %s" % gi_city.country_name_by_name(query)
except AttributeError:
response += ' | Location: Unknown'
+ except socket.gaierror:
+ return bot.say('[IP/Host Lookup] Unable to resolve IP/Hostname')
region_data = gi_city.region_by_name(query)
try:
| {"golden_diff": "diff --git a/sopel/modules/ip.py b/sopel/modules/ip.py\n--- a/sopel/modules/ip.py\n+++ b/sopel/modules/ip.py\n@@ -118,6 +118,8 @@\n response += \" | Location: %s\" % gi_city.country_name_by_name(query)\n except AttributeError:\n response += ' | Location: Unknown'\n+ except socket.gaierror:\n+ return bot.say('[IP/Host Lookup] Unable to resolve IP/Hostname')\n \n region_data = gi_city.region_by_name(query)\n try:\n", "issue": ".ip socket.gaierror on Sopel 6.1.1 on python 3.5.0\nOS X 11.10\n\n``` python\nWARNING:sopel.modules.ip:Downloading GeoIP database\nTraceback (most recent call last):\n File \"/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/bot.py\", line 260, in call\n exit_code = func(sopel, trigger)\n File \"/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/modules/ip.py\", line 118, in ip\n response += \" | Location: %s\" % gi_city.country_name_by_name(query)\n File \"/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/pygeoip/__init__.py\", line 502, in country_name_by_name\n addr = self._gethostbyname(hostname)\n File \"/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/pygeoip/__init__.py\", line 394, in _gethostbyname\n return socket.gethostbyname(hostname)\nsocket.gaierror: [Errno 8] nodename nor servname provided, or not known\n\n/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/bot.py:263: ResourceWarning: unclosed file <_io.BufferedReader name='/Users/thill/.sopel/GeoLiteCity.dat'>\n self.error(trigger)\n/Users/thill/projects/python/orcabot/venv_orcabot/lib/python3.5/site-packages/sopel/bot.py:263: ResourceWarning: unclosed file <_io.BufferedReader name='/Users/thill/.sopel/GeoIPASNum.dat'>\n self.error(trigger)\n```\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"GeoIP lookup module\"\"\"\n# Copyright 2011, Dimitri Molenaars, TyRope.nl,\n# Copyright \u00a9 2013, Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport pygeoip\nimport socket\nimport os\nimport gzip\n\nurlretrieve = None\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n try:\n # urlretrieve has been put under urllib.request in Python 3.\n # It's also deprecated so this should probably be replaced with\n # urllib2.\n from urllib.request import urlretrieve\n except ImportError:\n pass\n\nfrom sopel.config.types import StaticSection, FilenameAttribute\nfrom sopel.module import commands, example\nfrom sopel.logger import get_logger\n\nLOGGER = get_logger(__name__)\n\n\nclass GeoipSection(StaticSection):\n GeoIP_db_path = FilenameAttribute('GeoIP_db_path', directory=True)\n \"\"\"Path of the directory containing the GeoIP db files.\"\"\"\n\n\ndef configure(config):\n config.define_section('ip', GeoipSection)\n config.ip.configure_setting('GeoIP_db_path',\n 'Path of the GeoIP db files')\n\n\ndef setup(bot=None):\n if not bot:\n return # Because of some weird pytest thing?\n\n bot.config.define_section('ip', GeoipSection)\n\n\ndef _decompress(source, target, delete_after_decompression=True):\n \"\"\" Decompress a GZip file \"\"\"\n f_in = gzip.open(source, 'rb')\n f_out = open(target, 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n if delete_after_decompression:\n os.remove(source)\n\n\ndef _find_geoip_db(bot):\n \"\"\" Find the GeoIP database \"\"\"\n config = bot.config\n if config.ip.GeoIP_db_path:\n cities_db = os.path.join(config.ip.GeoIP_db_path, 'GeoLiteCity.dat')\n ipasnum_db = os.path.join(config.ip.GeoIP_db_path, 'GeoIPASNum.dat')\n if os.path.isfile(cities_db) and os.path.isfile(ipasnum_db):\n return config.ip.GeoIP_db_path\n else:\n LOGGER.warning(\n 'GeoIP path configured but DB not found in configured path'\n )\n if (os.path.isfile(os.path.join(bot.config.core.homedir, 'GeoLiteCity.dat')) and\n os.path.isfile(os.path.join(bot.config.core.homedir, 'GeoIPASNum.dat'))):\n return bot.config.core.homedir\n elif (os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoLiteCity.dat')) and\n os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoIPASNum.dat'))):\n return '/usr/share/GeoIP'\n elif urlretrieve:\n LOGGER.warning('Downloading GeoIP database')\n bot.say('Downloading GeoIP database, please wait...')\n geolite_city_url = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz'\n geolite_ASN_url = 'http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz'\n geolite_city_filepath = os.path.join(bot.config.core.homedir, 'GeoLiteCity.dat.gz')\n geolite_ASN_filepath = os.path.join(bot.config.core.homedir, 'GeoIPASNum.dat.gz')\n urlretrieve(geolite_city_url, geolite_city_filepath)\n urlretrieve(geolite_ASN_url, geolite_ASN_filepath)\n _decompress(geolite_city_filepath, geolite_city_filepath[:-3])\n _decompress(geolite_ASN_filepath, geolite_ASN_filepath[:-3])\n return bot.config.core.homedir\n else:\n return False\n\n\n@commands('iplookup', 'ip')\n@example('.ip 8.8.8.8',\n r'[IP/Host Lookup] Hostname: google-public-dns-a.google.com | Location: United States | Region: CA | ISP: AS15169 Google Inc.',\n re=True,\n ignore='Downloading GeoIP database, please wait...')\ndef ip(bot, trigger):\n \"\"\"IP Lookup tool\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term.\")\n query = trigger.group(2)\n db_path = _find_geoip_db(bot)\n if db_path is False:\n LOGGER.error('Can\\'t find (or download) usable GeoIP database')\n bot.say('Sorry, I don\\'t have a GeoIP database to use for this lookup')\n return False\n geolite_city_filepath = os.path.join(_find_geoip_db(bot), 'GeoLiteCity.dat')\n geolite_ASN_filepath = os.path.join(_find_geoip_db(bot), 'GeoIPASNum.dat')\n gi_city = pygeoip.GeoIP(geolite_city_filepath)\n gi_org = pygeoip.GeoIP(geolite_ASN_filepath)\n host = socket.getfqdn(query)\n response = \"[IP/Host Lookup] Hostname: %s\" % host\n try:\n response += \" | Location: %s\" % gi_city.country_name_by_name(query)\n except AttributeError:\n response += ' | Location: Unknown'\n\n region_data = gi_city.region_by_name(query)\n try:\n region = region_data['region_code'] # pygeoip >= 0.3.0\n except KeyError:\n region = region_data['region_name'] # pygeoip < 0.3.0\n if region:\n response += \" | Region: %s\" % region\n\n isp = gi_org.org_by_name(query)\n response += \" | ISP: %s\" % isp\n bot.say(response)\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/ip.py"}]} | 2,579 | 123 |
gh_patches_debug_35685 | rasdani/github-patches | git_diff | litestar-org__litestar-1011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Custom data types with validators cannot be serialized since 1.45
**Describe the bug**
Since version 1.45 custom data types with pydantic validators cannot be serialized anymore. Deserialization works fine.
**To Reproduce**
Example app that works in 1.44, but doesn't in 1.45+:
Note that the print gets executed.
```python
from pydantic import BaseModel
from starlite import Starlite, post, Request, Response, LoggingConfig
from starlite.utils import create_exception_response
class CustomType(str):
@classmethod
def __get_validators__(cls):
yield cls.get_validators_must_yield_something_so_here_we_are
@classmethod
def get_validators_must_yield_something_so_here_we_are(cls, value):
return cls(value)
class Model(BaseModel):
foo: CustomType
@post('/foo')
def foo(data: Model) -> Model:
print(data)
return data
def logging_exception_handler(request: Request, exc: Exception) -> Response:
request.logger.error("Application Exception", exc_info = exc)
return create_exception_response(exc)
app = Starlite(
[foo],
logging_config = LoggingConfig(),
exception_handlers={Exception: logging_exception_handler},
)
```
**Additional context**
This is the stacktrace that I got with the example above:
```
Traceback (most recent call last):
File ".../python3.11/site-packages/starlite/response/base.py", line 216, in render
return encode_json(content, self.serializer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/utils/serialization.py", line 59, in encode_json
return _msgspec_json_encoder.encode(obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/utils/serialization.py", line 24, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class 'app2.CustomType'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File ".../python3.11/site-packages/starlite/middleware/exceptions/middleware.py", line 47, in __call__
await self.app(scope, receive, send)
File ".../python3.11/site-packages/starlite/routes/http.py", line 75, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/routes/http.py", line 127, in _get_response_for_request
response = await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/routes/http.py", line 163, in _call_handler_function
else await route_handler.to_response(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/handlers/http.py", line 606, in to_response
return await response_handler(app=app, data=data, plugins=plugins, request=request) # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/handlers/http.py", line 203, in handler
response = response_class(
^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/response/base.py", line 109, in __init__
self.body = content if isinstance(content, bytes) else self.render(content)
^^^^^^^^^^^^^^^^^^^^
File ".../python3.11/site-packages/starlite/response/base.py", line 218, in render
raise ImproperlyConfiguredException("Unable to serialize response content") from e
starlite.exceptions.http_exceptions.ImproperlyConfiguredException: 500: Unable to serialize response content
```
</issue>
<code>
[start of starlite/utils/serialization.py]
1 from pathlib import PurePosixPath
2 from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
3
4 import msgspec
5 from pydantic import (
6 AnyUrl,
7 BaseModel,
8 ByteSize,
9 ConstrainedBytes,
10 ConstrainedDate,
11 ConstrainedDecimal,
12 ConstrainedFloat,
13 ConstrainedFrozenSet,
14 ConstrainedInt,
15 ConstrainedList,
16 ConstrainedSet,
17 ConstrainedStr,
18 EmailStr,
19 NameEmail,
20 PaymentCardNumber,
21 SecretField,
22 StrictBool,
23 )
24 from pydantic.color import Color
25
26 if TYPE_CHECKING:
27 from starlite.types import TypeEncodersMap
28
29 DEFAULT_TYPE_ENCODERS: "TypeEncodersMap" = {
30 PurePosixPath: str,
31 # pydantic specific types
32 BaseModel: lambda m: m.dict(),
33 ByteSize: lambda b: b.real,
34 EmailStr: str,
35 NameEmail: str,
36 Color: str,
37 AnyUrl: str,
38 SecretField: str,
39 ConstrainedInt: int,
40 ConstrainedFloat: float,
41 ConstrainedStr: str,
42 ConstrainedBytes: lambda b: b.decode("utf-8"),
43 ConstrainedList: list,
44 ConstrainedSet: set,
45 ConstrainedFrozenSet: frozenset,
46 ConstrainedDecimal: float,
47 ConstrainedDate: lambda d: d.isoformat(),
48 PaymentCardNumber: str,
49 StrictBool: int, # pydantic compatibility
50 }
51
52
53 def default_serializer(value: Any, type_encoders: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:
54 """Transform values non-natively supported by `msgspec`
55
56 Args:
57 value: A value to serialize#
58 type_encoders: Mapping of types to callables to transforming types
59 Returns:
60 A serialized value
61 Raises:
62 TypeError: if value is not supported
63 """
64 if type_encoders is None:
65 type_encoders = DEFAULT_TYPE_ENCODERS
66 for base in value.__class__.__mro__[:-1]:
67 try:
68 encoder = type_encoders[base]
69 except KeyError:
70 continue
71 return encoder(value)
72 raise TypeError(f"Unsupported type: {type(value)!r}")
73
74
75 def dec_hook(type_: Any, value: Any) -> Any: # pragma: no cover
76 """Transform values non-natively supported by `msgspec`
77
78 Args:
79 type_: Encountered type
80 value: Value to coerce
81
82 Returns:
83 A `msgspec`-supported type
84 """
85 if issubclass(type_, BaseModel):
86 return type_(**value)
87 raise TypeError(f"Unsupported type: {type(value)!r}")
88
89
90 _msgspec_json_encoder = msgspec.json.Encoder(enc_hook=default_serializer)
91 _msgspec_json_decoder = msgspec.json.Decoder(dec_hook=dec_hook)
92 _msgspec_msgpack_encoder = msgspec.msgpack.Encoder(enc_hook=default_serializer)
93 _msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook)
94
95
96 def encode_json(obj: Any, default: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:
97 """Encode a value into JSON.
98
99 Args:
100 obj: Value to encode
101 default: Optional callable to support non-natively supported types.
102
103 Returns:
104 JSON as bytes
105 """
106 if default is None or default is default_serializer:
107 return _msgspec_json_encoder.encode(obj)
108 return msgspec.json.encode(obj, enc_hook=default)
109
110
111 def decode_json(raw: Union[str, bytes]) -> Any:
112 """Decode a JSON string/bytes into an object.
113
114 Args:
115 raw: Value to decode
116
117 Returns:
118 An object
119 """
120 return _msgspec_json_decoder.decode(raw)
121
122
123 def encode_msgpack(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:
124 """Encode a value into MessagePack.
125
126 Args:
127 obj: Value to encode
128 enc_hook: Optional callable to support non-natively supported types
129
130 Returns:
131 MessagePack as bytes
132 """
133 if enc_hook is None or enc_hook is default_serializer:
134 return _msgspec_msgpack_encoder.encode(obj)
135 return msgspec.msgpack.encode(obj, enc_hook=enc_hook)
136
137
138 def decode_msgpack(raw: bytes) -> Any:
139 """Decode a MessagePack string/bytes into an object.
140
141 Args:
142 raw: Value to decode
143
144 Returns:
145 An object
146 """
147 return _msgspec_msgpack_decoder.decode(raw)
148
[end of starlite/utils/serialization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/utils/serialization.py b/starlite/utils/serialization.py
--- a/starlite/utils/serialization.py
+++ b/starlite/utils/serialization.py
@@ -1,27 +1,30 @@
+from collections import deque
+from decimal import Decimal
+from ipaddress import (
+ IPv4Address,
+ IPv4Interface,
+ IPv4Network,
+ IPv6Address,
+ IPv6Interface,
+ IPv6Network,
+)
from pathlib import PurePosixPath
+from re import Pattern
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
import msgspec
from pydantic import (
- AnyUrl,
BaseModel,
ByteSize,
ConstrainedBytes,
ConstrainedDate,
ConstrainedDecimal,
- ConstrainedFloat,
- ConstrainedFrozenSet,
- ConstrainedInt,
- ConstrainedList,
- ConstrainedSet,
- ConstrainedStr,
- EmailStr,
NameEmail,
- PaymentCardNumber,
SecretField,
StrictBool,
)
from pydantic.color import Color
+from pydantic.json import decimal_encoder
if TYPE_CHECKING:
from starlite.types import TypeEncodersMap
@@ -31,22 +34,36 @@
# pydantic specific types
BaseModel: lambda m: m.dict(),
ByteSize: lambda b: b.real,
- EmailStr: str,
NameEmail: str,
Color: str,
- AnyUrl: str,
SecretField: str,
- ConstrainedInt: int,
- ConstrainedFloat: float,
- ConstrainedStr: str,
ConstrainedBytes: lambda b: b.decode("utf-8"),
- ConstrainedList: list,
- ConstrainedSet: set,
- ConstrainedFrozenSet: frozenset,
ConstrainedDecimal: float,
ConstrainedDate: lambda d: d.isoformat(),
- PaymentCardNumber: str,
- StrictBool: int, # pydantic compatibility
+ IPv4Address: str,
+ IPv4Interface: str,
+ IPv4Network: str,
+ IPv6Address: str,
+ IPv6Interface: str,
+ IPv6Network: str,
+ # pydantic compatibility
+ deque: list,
+ Decimal: decimal_encoder,
+ StrictBool: int,
+ Pattern: lambda o: o.pattern,
+ # support subclasses of stdlib types, e.g. pydantic's constrained types. If no
+ # previous type matched, these will be the last type in the mro, so we use this to
+ # (attempt to) convert a subclass into its base class.
+ # see https://github.com/jcrist/msgspec/issues/248
+ # and https://github.com/starlite-api/starlite/issues/1003
+ str: str,
+ int: int,
+ float: float,
+ list: list,
+ tuple: tuple,
+ set: set,
+ frozenset: frozenset,
+ dict: dict,
}
| {"golden_diff": "diff --git a/starlite/utils/serialization.py b/starlite/utils/serialization.py\n--- a/starlite/utils/serialization.py\n+++ b/starlite/utils/serialization.py\n@@ -1,27 +1,30 @@\n+from collections import deque\n+from decimal import Decimal\n+from ipaddress import (\n+ IPv4Address,\n+ IPv4Interface,\n+ IPv4Network,\n+ IPv6Address,\n+ IPv6Interface,\n+ IPv6Network,\n+)\n from pathlib import PurePosixPath\n+from re import Pattern\n from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union\n \n import msgspec\n from pydantic import (\n- AnyUrl,\n BaseModel,\n ByteSize,\n ConstrainedBytes,\n ConstrainedDate,\n ConstrainedDecimal,\n- ConstrainedFloat,\n- ConstrainedFrozenSet,\n- ConstrainedInt,\n- ConstrainedList,\n- ConstrainedSet,\n- ConstrainedStr,\n- EmailStr,\n NameEmail,\n- PaymentCardNumber,\n SecretField,\n StrictBool,\n )\n from pydantic.color import Color\n+from pydantic.json import decimal_encoder\n \n if TYPE_CHECKING:\n from starlite.types import TypeEncodersMap\n@@ -31,22 +34,36 @@\n # pydantic specific types\n BaseModel: lambda m: m.dict(),\n ByteSize: lambda b: b.real,\n- EmailStr: str,\n NameEmail: str,\n Color: str,\n- AnyUrl: str,\n SecretField: str,\n- ConstrainedInt: int,\n- ConstrainedFloat: float,\n- ConstrainedStr: str,\n ConstrainedBytes: lambda b: b.decode(\"utf-8\"),\n- ConstrainedList: list,\n- ConstrainedSet: set,\n- ConstrainedFrozenSet: frozenset,\n ConstrainedDecimal: float,\n ConstrainedDate: lambda d: d.isoformat(),\n- PaymentCardNumber: str,\n- StrictBool: int, # pydantic compatibility\n+ IPv4Address: str,\n+ IPv4Interface: str,\n+ IPv4Network: str,\n+ IPv6Address: str,\n+ IPv6Interface: str,\n+ IPv6Network: str,\n+ # pydantic compatibility\n+ deque: list,\n+ Decimal: decimal_encoder,\n+ StrictBool: int,\n+ Pattern: lambda o: o.pattern,\n+ # support subclasses of stdlib types, e.g. pydantic's constrained types. If no\n+ # previous type matched, these will be the last type in the mro, so we use this to\n+ # (attempt to) convert a subclass into its base class.\n+ # see https://github.com/jcrist/msgspec/issues/248\n+ # and https://github.com/starlite-api/starlite/issues/1003\n+ str: str,\n+ int: int,\n+ float: float,\n+ list: list,\n+ tuple: tuple,\n+ set: set,\n+ frozenset: frozenset,\n+ dict: dict,\n }\n", "issue": "Bug: Custom data types with validators cannot be serialized since 1.45\n**Describe the bug**\r\nSince version 1.45 custom data types with pydantic validators cannot be serialized anymore. Deserialization works fine.\r\n\r\n**To Reproduce**\r\nExample app that works in 1.44, but doesn't in 1.45+:\r\nNote that the print gets executed.\r\n```python\r\nfrom pydantic import BaseModel\r\nfrom starlite import Starlite, post, Request, Response, LoggingConfig\r\nfrom starlite.utils import create_exception_response\r\n\r\n\r\nclass CustomType(str):\r\n @classmethod\r\n def __get_validators__(cls):\r\n yield cls.get_validators_must_yield_something_so_here_we_are\r\n\r\n @classmethod\r\n def get_validators_must_yield_something_so_here_we_are(cls, value):\r\n return cls(value)\r\n\r\n\r\nclass Model(BaseModel):\r\n foo: CustomType\r\n\r\n\r\n@post('/foo')\r\ndef foo(data: Model) -> Model:\r\n print(data)\r\n return data\r\n\r\n\r\ndef logging_exception_handler(request: Request, exc: Exception) -> Response:\r\n request.logger.error(\"Application Exception\", exc_info = exc)\r\n return create_exception_response(exc)\r\n\r\n\r\napp = Starlite(\r\n [foo],\r\n logging_config = LoggingConfig(),\r\n exception_handlers={Exception: logging_exception_handler},\r\n)\r\n```\r\n\r\n**Additional context**\r\nThis is the stacktrace that I got with the example above:\r\n```\r\nTraceback (most recent call last):\r\n File \".../python3.11/site-packages/starlite/response/base.py\", line 216, in render\r\n return encode_json(content, self.serializer)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/utils/serialization.py\", line 59, in encode_json\r\n return _msgspec_json_encoder.encode(obj)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/utils/serialization.py\", line 24, in default_serializer\r\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\r\nTypeError: Unsupported type: <class 'app2.CustomType'>\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \".../python3.11/site-packages/starlite/middleware/exceptions/middleware.py\", line 47, in __call__\r\n await self.app(scope, receive, send)\r\n File \".../python3.11/site-packages/starlite/routes/http.py\", line 75, in handle\r\n response = await self._get_response_for_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/routes/http.py\", line 127, in _get_response_for_request\r\n response = await self._call_handler_function(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/routes/http.py\", line 163, in _call_handler_function\r\n else await route_handler.to_response(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/handlers/http.py\", line 606, in to_response\r\n return await response_handler(app=app, data=data, plugins=plugins, request=request) # type: ignore\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/handlers/http.py\", line 203, in handler\r\n response = response_class(\r\n ^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/response/base.py\", line 109, in __init__\r\n self.body = content if isinstance(content, bytes) else self.render(content)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/starlite/response/base.py\", line 218, in render\r\n raise ImproperlyConfiguredException(\"Unable to serialize response content\") from e\r\nstarlite.exceptions.http_exceptions.ImproperlyConfiguredException: 500: Unable to serialize response content\r\n```\r\n\n", "before_files": [{"content": "from pathlib import PurePosixPath\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union\n\nimport msgspec\nfrom pydantic import (\n AnyUrl,\n BaseModel,\n ByteSize,\n ConstrainedBytes,\n ConstrainedDate,\n ConstrainedDecimal,\n ConstrainedFloat,\n ConstrainedFrozenSet,\n ConstrainedInt,\n ConstrainedList,\n ConstrainedSet,\n ConstrainedStr,\n EmailStr,\n NameEmail,\n PaymentCardNumber,\n SecretField,\n StrictBool,\n)\nfrom pydantic.color import Color\n\nif TYPE_CHECKING:\n from starlite.types import TypeEncodersMap\n\nDEFAULT_TYPE_ENCODERS: \"TypeEncodersMap\" = {\n PurePosixPath: str,\n # pydantic specific types\n BaseModel: lambda m: m.dict(),\n ByteSize: lambda b: b.real,\n EmailStr: str,\n NameEmail: str,\n Color: str,\n AnyUrl: str,\n SecretField: str,\n ConstrainedInt: int,\n ConstrainedFloat: float,\n ConstrainedStr: str,\n ConstrainedBytes: lambda b: b.decode(\"utf-8\"),\n ConstrainedList: list,\n ConstrainedSet: set,\n ConstrainedFrozenSet: frozenset,\n ConstrainedDecimal: float,\n ConstrainedDate: lambda d: d.isoformat(),\n PaymentCardNumber: str,\n StrictBool: int, # pydantic compatibility\n}\n\n\ndef default_serializer(value: Any, type_encoders: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:\n \"\"\"Transform values non-natively supported by `msgspec`\n\n Args:\n value: A value to serialize#\n type_encoders: Mapping of types to callables to transforming types\n Returns:\n A serialized value\n Raises:\n TypeError: if value is not supported\n \"\"\"\n if type_encoders is None:\n type_encoders = DEFAULT_TYPE_ENCODERS\n for base in value.__class__.__mro__[:-1]:\n try:\n encoder = type_encoders[base]\n except KeyError:\n continue\n return encoder(value)\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\n\n\ndef dec_hook(type_: Any, value: Any) -> Any: # pragma: no cover\n \"\"\"Transform values non-natively supported by `msgspec`\n\n Args:\n type_: Encountered type\n value: Value to coerce\n\n Returns:\n A `msgspec`-supported type\n \"\"\"\n if issubclass(type_, BaseModel):\n return type_(**value)\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\n\n\n_msgspec_json_encoder = msgspec.json.Encoder(enc_hook=default_serializer)\n_msgspec_json_decoder = msgspec.json.Decoder(dec_hook=dec_hook)\n_msgspec_msgpack_encoder = msgspec.msgpack.Encoder(enc_hook=default_serializer)\n_msgspec_msgpack_decoder = msgspec.msgpack.Decoder(dec_hook=dec_hook)\n\n\ndef encode_json(obj: Any, default: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into JSON.\n\n Args:\n obj: Value to encode\n default: Optional callable to support non-natively supported types.\n\n Returns:\n JSON as bytes\n \"\"\"\n if default is None or default is default_serializer:\n return _msgspec_json_encoder.encode(obj)\n return msgspec.json.encode(obj, enc_hook=default)\n\n\ndef decode_json(raw: Union[str, bytes]) -> Any:\n \"\"\"Decode a JSON string/bytes into an object.\n\n Args:\n raw: Value to decode\n\n Returns:\n An object\n \"\"\"\n return _msgspec_json_decoder.decode(raw)\n\n\ndef encode_msgpack(obj: Any, enc_hook: Optional[Callable[[Any], Any]] = default_serializer) -> bytes:\n \"\"\"Encode a value into MessagePack.\n\n Args:\n obj: Value to encode\n enc_hook: Optional callable to support non-natively supported types\n\n Returns:\n MessagePack as bytes\n \"\"\"\n if enc_hook is None or enc_hook is default_serializer:\n return _msgspec_msgpack_encoder.encode(obj)\n return msgspec.msgpack.encode(obj, enc_hook=enc_hook)\n\n\ndef decode_msgpack(raw: bytes) -> Any:\n \"\"\"Decode a MessagePack string/bytes into an object.\n\n Args:\n raw: Value to decode\n\n Returns:\n An object\n \"\"\"\n return _msgspec_msgpack_decoder.decode(raw)\n", "path": "starlite/utils/serialization.py"}]} | 2,757 | 686 |
gh_patches_debug_7478 | rasdani/github-patches | git_diff | facebookresearch__xformers-180 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feat] Make the batch dimension broadcastable
# 🚀 Feature
Support different key/query/value batch sizes, if 1 or the same value
## Motivation
(popped up when working on [salina](https://github.com/facebookresearch/salina)) PyTorch MHA handles key/query/value with 1 to B batch sizes, will broadcast if in need, We don´t do that and hard crash instead, so the users have to align dimensions from the outside
## Pitch
<!-- A clear and concise description of what you want to happen. -->
## Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
## Additional context
<!-- Add any other context or screenshots about the feature request here. -->
</issue>
<code>
[start of xformers/components/multi_head_dispatch.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 from dataclasses import asdict, dataclass
8 from typing import Optional
9
10 import torch
11 import torch.nn as nn
12 from torch.nn.init import constant_
13
14 from xformers.components.attention import Attention
15 from xformers.components.in_proj_container import InProjContainer, InProjParams
16 from xformers.components.positional_embedding import RotaryEmbedding
17
18
19 @dataclass
20 class MultiHeadDispatchConfig:
21 dim_model: int
22 residual_dropout: float
23 num_heads: int
24 attention: Attention
25 bias: bool
26 dim_key: Optional[int]
27 dim_value: Optional[int]
28 in_proj_container: Optional[InProjContainer]
29 use_separate_proj_weight: Optional[bool]
30 use_rotary_embeddings: Optional[bool]
31 out_proj: Optional[nn.Module]
32
33
34 # Move head forward and fold into batch dim. dimensions become (B * nh, S, hs)
35 def _fold_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):
36 return t.view(B, S, H, Hs).transpose(1, 2).flatten(start_dim=0, end_dim=1)
37
38
39 def _split_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):
40 return t.view(B, S, H, Hs).transpose(1, 2)
41
42
43 class MultiHeadDispatch(nn.Module):
44 """
45 A multi-head masked self-attention dispatch mechanism, with a projection at the end,
46 following the architecture proposed in `Attention is all you need`_, Vaswani et al.
47
48 The actual attention mechanism can vary, as well as the projections.
49 This can be used to wrap the proposed attention mechanisms and make them multi-head aware,
50 but it is optional.
51
52 .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762v5
53 """
54
55 def __init__(
56 self,
57 dim_model: int,
58 residual_dropout: float,
59 num_heads: int,
60 attention: Attention,
61 bias: bool = True,
62 dim_key: Optional[int] = None,
63 dim_value: Optional[int] = None,
64 in_proj_container: Optional[InProjContainer] = None,
65 use_separate_proj_weight: Optional[bool] = False,
66 use_rotary_embeddings: Optional[bool] = False,
67 out_proj: Optional[nn.Module] = None,
68 *args,
69 **kwargs,
70 ):
71 super().__init__()
72
73 assert (
74 dim_model % num_heads == 0
75 ) # static preset for now, each head works on 1/d the embeddings, could be relaxed
76 assert num_heads > 0
77
78 # Popular default is that all latent dimensions are the same
79 dim_key, dim_value = map(lambda x: x if x else dim_model, (dim_key, dim_value))
80
81 self.num_heads = num_heads
82 self.dim_k = dim_key // num_heads
83 self.dim_value = dim_value
84 self.dim_model = dim_model
85 self.attention = attention
86
87 # key, query, value projections for all heads
88 # critical options are
89 # - are we sharing weights ?
90 # - are we adding biases, and if yes are they shared ?
91 if attention.requires_input_projection:
92 self.in_proj_container = (
93 in_proj_container
94 if in_proj_container is not None
95 else InProjContainer(
96 query_proj_params=InProjParams(dim_model, dim_key, bias=bias),
97 key_proj_params=InProjParams(dim_model, dim_key, bias=bias)
98 if use_separate_proj_weight
99 else None,
100 value_proj_params=InProjParams(dim_model, dim_value, bias=bias)
101 if use_separate_proj_weight
102 else None,
103 )
104 )
105
106 # Optional rotary embeddings
107 self.rotary_embeddings = (
108 RotaryEmbedding(self.dim_k) if use_rotary_embeddings else None
109 )
110
111 # Regularization
112 self.resid_drop = nn.Dropout(residual_dropout, inplace=False)
113
114 # Output projection
115 self.proj = out_proj if out_proj else nn.Linear(dim_model, dim_model, bias=bias)
116 if isinstance(self.proj, nn.Linear) and self.proj.bias is not None:
117 constant_(self.proj.bias, 0.0)
118
119 def _check(self, t, name):
120 assert (
121 t.shape[2] % self.dim_k == 0
122 ), f"the {name} embeddings need to be divisible by the number of heads"
123
124 def forward(
125 self,
126 query: torch.Tensor,
127 key: Optional[torch.Tensor] = None,
128 value: Optional[torch.Tensor] = None,
129 att_mask: Optional[torch.Tensor] = None,
130 key_padding_mask: Optional[torch.Tensor] = None,
131 ) -> torch.Tensor:
132 """
133 Expected input dimensions are [batch size, sequence length, embed dim]
134 Output dimensions are [batch size, sequence length, embed dim]
135 """
136
137 if key is None:
138 key = query
139 if value is None:
140 value = query
141
142 # Check the dimensions properly
143 self._check(query, "query")
144 self._check(value, "value")
145 self._check(key, "key")
146
147 B, S_Q, _ = query.size() # Batch x Sequence x Embedding (latent)
148 _, S_K, _ = key.size() # K, Q's sequence length could differ
149
150 # Catch different query and key length but a causal attention
151 if S_Q != S_K:
152 assert (
153 not self.attention.requires_same_k_q_dimensions
154 ), "This attention mechanism requires query and key to have the same sequence (context) lengths"
155
156 if hasattr(self.attention, "causal"):
157 assert not self.attention.causal, (
158 "Causal attention is not supported when key and query have different sequence lengths.\n"
159 + "In that case causality is ill-determined. Please pad your sequences accordingly"
160 )
161
162 if self.attention.requires_skip_multi_head:
163 return self.attention(
164 query, key, value, att_mask=att_mask, key_padding_mask=key_padding_mask
165 )
166
167 # Calculate query, key, values for all heads in batch
168 if self.attention.requires_input_projection:
169 q, k, v = self.in_proj_container(query=query, key=key, value=value)
170 else:
171 k, q, v = key, query, value
172
173 # Optional: rotary embedding, add relative positioning information
174 if self.rotary_embeddings:
175 # rotary requires the head dimension
176 q = _split_heads(q, B, S_Q, self.num_heads, self.dim_k)
177 k = _split_heads(k, B, S_K, self.num_heads, self.dim_k)
178 v = _split_heads(v, B, S_K, self.num_heads, self.dim_k)
179
180 q, k = self.rotary_embeddings(q=q, k=k)
181
182 if not self.attention.requires_head_dimension:
183 q, k, v = q.flatten(0, 1), k.flatten(0, 1), v.flatten(0, 1)
184
185 else:
186 # Reshape k/q/v to either expose the heads, or fold the head dimension into the batch
187 reshape_fn = (
188 _split_heads if self.attention.requires_head_dimension else _fold_heads
189 )
190
191 q = reshape_fn(q, B, S_Q, self.num_heads, self.dim_k)
192 k = reshape_fn(k, B, S_K, self.num_heads, self.dim_k)
193 v = reshape_fn(v, B, S_K, self.num_heads, self.dim_k)
194
195 # Self-attend
196 y = self.attention(
197 q=q, k=k, v=v, att_mask=att_mask, key_padding_mask=key_padding_mask
198 )
199
200 # Re-assemble all head outputs side by side
201 y = (
202 y.view(B, self.num_heads, S_Q, self.dim_k)
203 .transpose(1, 2)
204 .flatten(start_dim=2, end_dim=3)
205 )
206
207 # Output projection, dropout and good to go
208 y = self.resid_drop(self.proj(y))
209
210 # Return the same sequence size as the input
211 return y
212
213 @classmethod
214 def from_config(cls, config: MultiHeadDispatchConfig):
215 # Generate the class inputs from the config
216 fields = asdict(config)
217
218 # Skip all Nones so that default values are used
219 fields = {k: v for k, v in fields.items() if v is not None}
220
221 return cls(**fields)
222
[end of xformers/components/multi_head_dispatch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xformers/components/multi_head_dispatch.py b/xformers/components/multi_head_dispatch.py
--- a/xformers/components/multi_head_dispatch.py
+++ b/xformers/components/multi_head_dispatch.py
@@ -143,6 +143,11 @@
self._check(value, "value")
self._check(key, "key")
+ max_batch = max((query.shape[0], key.shape[0], value.shape[0]))
+ query, key, value = map(
+ lambda x: x.expand(max_batch, -1, -1), [query, key, value]
+ )
+
B, S_Q, _ = query.size() # Batch x Sequence x Embedding (latent)
_, S_K, _ = key.size() # K, Q's sequence length could differ
| {"golden_diff": "diff --git a/xformers/components/multi_head_dispatch.py b/xformers/components/multi_head_dispatch.py\n--- a/xformers/components/multi_head_dispatch.py\n+++ b/xformers/components/multi_head_dispatch.py\n@@ -143,6 +143,11 @@\n self._check(value, \"value\")\n self._check(key, \"key\")\n \n+ max_batch = max((query.shape[0], key.shape[0], value.shape[0]))\n+ query, key, value = map(\n+ lambda x: x.expand(max_batch, -1, -1), [query, key, value]\n+ )\n+\n B, S_Q, _ = query.size() # Batch x Sequence x Embedding (latent)\n _, S_K, _ = key.size() # K, Q's sequence length could differ\n", "issue": "[feat] Make the batch dimension broadcastable\n# \ud83d\ude80 Feature\r\nSupport different key/query/value batch sizes, if 1 or the same value\r\n\r\n## Motivation\r\n(popped up when working on [salina](https://github.com/facebookresearch/salina)) PyTorch MHA handles key/query/value with 1 to B batch sizes, will broadcast if in need, We don\u00b4t do that and hard crash instead, so the users have to align dimensions from the outside\r\n\r\n## Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n## Alternatives\r\n\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom dataclasses import asdict, dataclass\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.init import constant_\n\nfrom xformers.components.attention import Attention\nfrom xformers.components.in_proj_container import InProjContainer, InProjParams\nfrom xformers.components.positional_embedding import RotaryEmbedding\n\n\n@dataclass\nclass MultiHeadDispatchConfig:\n dim_model: int\n residual_dropout: float\n num_heads: int\n attention: Attention\n bias: bool\n dim_key: Optional[int]\n dim_value: Optional[int]\n in_proj_container: Optional[InProjContainer]\n use_separate_proj_weight: Optional[bool]\n use_rotary_embeddings: Optional[bool]\n out_proj: Optional[nn.Module]\n\n\n# Move head forward and fold into batch dim. dimensions become (B * nh, S, hs)\ndef _fold_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):\n return t.view(B, S, H, Hs).transpose(1, 2).flatten(start_dim=0, end_dim=1)\n\n\ndef _split_heads(t: torch.Tensor, B: int, S: int, H: int, Hs: int):\n return t.view(B, S, H, Hs).transpose(1, 2)\n\n\nclass MultiHeadDispatch(nn.Module):\n \"\"\"\n A multi-head masked self-attention dispatch mechanism, with a projection at the end,\n following the architecture proposed in `Attention is all you need`_, Vaswani et al.\n\n The actual attention mechanism can vary, as well as the projections.\n This can be used to wrap the proposed attention mechanisms and make them multi-head aware,\n but it is optional.\n\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762v5\n \"\"\"\n\n def __init__(\n self,\n dim_model: int,\n residual_dropout: float,\n num_heads: int,\n attention: Attention,\n bias: bool = True,\n dim_key: Optional[int] = None,\n dim_value: Optional[int] = None,\n in_proj_container: Optional[InProjContainer] = None,\n use_separate_proj_weight: Optional[bool] = False,\n use_rotary_embeddings: Optional[bool] = False,\n out_proj: Optional[nn.Module] = None,\n *args,\n **kwargs,\n ):\n super().__init__()\n\n assert (\n dim_model % num_heads == 0\n ) # static preset for now, each head works on 1/d the embeddings, could be relaxed\n assert num_heads > 0\n\n # Popular default is that all latent dimensions are the same\n dim_key, dim_value = map(lambda x: x if x else dim_model, (dim_key, dim_value))\n\n self.num_heads = num_heads\n self.dim_k = dim_key // num_heads\n self.dim_value = dim_value\n self.dim_model = dim_model\n self.attention = attention\n\n # key, query, value projections for all heads\n # critical options are\n # - are we sharing weights ?\n # - are we adding biases, and if yes are they shared ?\n if attention.requires_input_projection:\n self.in_proj_container = (\n in_proj_container\n if in_proj_container is not None\n else InProjContainer(\n query_proj_params=InProjParams(dim_model, dim_key, bias=bias),\n key_proj_params=InProjParams(dim_model, dim_key, bias=bias)\n if use_separate_proj_weight\n else None,\n value_proj_params=InProjParams(dim_model, dim_value, bias=bias)\n if use_separate_proj_weight\n else None,\n )\n )\n\n # Optional rotary embeddings\n self.rotary_embeddings = (\n RotaryEmbedding(self.dim_k) if use_rotary_embeddings else None\n )\n\n # Regularization\n self.resid_drop = nn.Dropout(residual_dropout, inplace=False)\n\n # Output projection\n self.proj = out_proj if out_proj else nn.Linear(dim_model, dim_model, bias=bias)\n if isinstance(self.proj, nn.Linear) and self.proj.bias is not None:\n constant_(self.proj.bias, 0.0)\n\n def _check(self, t, name):\n assert (\n t.shape[2] % self.dim_k == 0\n ), f\"the {name} embeddings need to be divisible by the number of heads\"\n\n def forward(\n self,\n query: torch.Tensor,\n key: Optional[torch.Tensor] = None,\n value: Optional[torch.Tensor] = None,\n att_mask: Optional[torch.Tensor] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n \"\"\"\n Expected input dimensions are [batch size, sequence length, embed dim]\n Output dimensions are [batch size, sequence length, embed dim]\n \"\"\"\n\n if key is None:\n key = query\n if value is None:\n value = query\n\n # Check the dimensions properly\n self._check(query, \"query\")\n self._check(value, \"value\")\n self._check(key, \"key\")\n\n B, S_Q, _ = query.size() # Batch x Sequence x Embedding (latent)\n _, S_K, _ = key.size() # K, Q's sequence length could differ\n\n # Catch different query and key length but a causal attention\n if S_Q != S_K:\n assert (\n not self.attention.requires_same_k_q_dimensions\n ), \"This attention mechanism requires query and key to have the same sequence (context) lengths\"\n\n if hasattr(self.attention, \"causal\"):\n assert not self.attention.causal, (\n \"Causal attention is not supported when key and query have different sequence lengths.\\n\"\n + \"In that case causality is ill-determined. Please pad your sequences accordingly\"\n )\n\n if self.attention.requires_skip_multi_head:\n return self.attention(\n query, key, value, att_mask=att_mask, key_padding_mask=key_padding_mask\n )\n\n # Calculate query, key, values for all heads in batch\n if self.attention.requires_input_projection:\n q, k, v = self.in_proj_container(query=query, key=key, value=value)\n else:\n k, q, v = key, query, value\n\n # Optional: rotary embedding, add relative positioning information\n if self.rotary_embeddings:\n # rotary requires the head dimension\n q = _split_heads(q, B, S_Q, self.num_heads, self.dim_k)\n k = _split_heads(k, B, S_K, self.num_heads, self.dim_k)\n v = _split_heads(v, B, S_K, self.num_heads, self.dim_k)\n\n q, k = self.rotary_embeddings(q=q, k=k)\n\n if not self.attention.requires_head_dimension:\n q, k, v = q.flatten(0, 1), k.flatten(0, 1), v.flatten(0, 1)\n\n else:\n # Reshape k/q/v to either expose the heads, or fold the head dimension into the batch\n reshape_fn = (\n _split_heads if self.attention.requires_head_dimension else _fold_heads\n )\n\n q = reshape_fn(q, B, S_Q, self.num_heads, self.dim_k)\n k = reshape_fn(k, B, S_K, self.num_heads, self.dim_k)\n v = reshape_fn(v, B, S_K, self.num_heads, self.dim_k)\n\n # Self-attend\n y = self.attention(\n q=q, k=k, v=v, att_mask=att_mask, key_padding_mask=key_padding_mask\n )\n\n # Re-assemble all head outputs side by side\n y = (\n y.view(B, self.num_heads, S_Q, self.dim_k)\n .transpose(1, 2)\n .flatten(start_dim=2, end_dim=3)\n )\n\n # Output projection, dropout and good to go\n y = self.resid_drop(self.proj(y))\n\n # Return the same sequence size as the input\n return y\n\n @classmethod\n def from_config(cls, config: MultiHeadDispatchConfig):\n # Generate the class inputs from the config\n fields = asdict(config)\n\n # Skip all Nones so that default values are used\n fields = {k: v for k, v in fields.items() if v is not None}\n\n return cls(**fields)\n", "path": "xformers/components/multi_head_dispatch.py"}]} | 3,185 | 184 |
gh_patches_debug_8562 | rasdani/github-patches | git_diff | DataDog__dd-agent-1443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[zookeeper] bytes_outstanding metric returns number of requests served by zk node
User checking through the ZooKeeper check noticed that the bytes_outsanding metric the DD agent is scrapping is not bytes, but rather the number of inflight requests currently being serviced by the ZK node.
https://github.com/apache/zookeeper/blob/ca480231e1bdcbe6beda9142766fc3caa2031cd8/src/java/main/org/apache/zookeeper/server/ServerStats.java#L104
https://github.com/apache/zookeeper/blob/ca480231e1bdcbe6beda9142766fc3caa2031cd8/src/java/main/org/apache/zookeeper/server/ZooKeeperServer.java#L740
</issue>
<code>
[start of checks.d/zk.py]
1 '''
2 Parses the response from zookeeper's `stat` admin command, which looks like:
3
4 ```
5 Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT
6 Clients:
7 /10.42.114.160:32634[1](queued=0,recved=12,sent=0)
8 /10.37.137.74:21873[1](queued=0,recved=53613,sent=0)
9 /10.37.137.74:21876[1](queued=0,recved=57436,sent=0)
10 /10.115.77.32:32990[1](queued=0,recved=16,sent=0)
11 /10.37.137.74:21891[1](queued=0,recved=55011,sent=0)
12 /10.37.137.74:21797[1](queued=0,recved=19431,sent=0)
13
14 Latency min/avg/max: -10/0/20007
15 Received: 101032173
16 Sent: 0
17 Outstanding: 0
18 Zxid: 0x1034799c7
19 Mode: leader
20 Node count: 487
21 ```
22
23 Tested with Zookeeper versions 3.0.0 to 3.4.5
24
25 '''
26 # stdlib
27 import re
28 import socket
29 import struct
30 from StringIO import StringIO
31
32 # project
33 from checks import AgentCheck
34
35 class ZKConnectionFailure(Exception):
36 """ Raised when we are unable to connect or get the output of a command. """
37 pass
38
39
40 class ZookeeperCheck(AgentCheck):
41 version_pattern = re.compile(r'Zookeeper version: ([^.]+)\.([^.]+)\.([^-]+)', flags=re.I)
42
43 SOURCE_TYPE_NAME = 'zookeeper'
44
45 def check(self, instance):
46 host = instance.get('host', 'localhost')
47 port = int(instance.get('port', 2181))
48 timeout = float(instance.get('timeout', 3.0))
49 expected_mode = (instance.get('expected_mode') or '').strip()
50 tags = instance.get('tags', [])
51 cx_args = (host, port, timeout)
52
53 # Send a service check based on the `ruok` response.
54 try:
55 ruok_out = self._send_command('ruok', *cx_args)
56 except ZKConnectionFailure:
57 # The server should not respond at all if it's not OK.
58 status = AgentCheck.CRITICAL
59 message = 'No response from `ruok` command'
60 self.increment('zookeeper.timeouts')
61 else:
62 ruok_out.seek(0)
63 ruok = ruok_out.readline()
64 if ruok == 'imok':
65 status = AgentCheck.OK
66 else:
67 status = AgentCheck.WARNING
68 message = u'Response from the server: %s' % ruok
69 self.service_check('zookeeper.ruok', status, message=message)
70
71 # Read metrics from the `stat` output.
72 try:
73 stat_out = self._send_command('stat', *cx_args)
74 except ZKConnectionFailure:
75 self.increment('zookeeper.timeouts')
76 else:
77 # Parse the response
78 metrics, new_tags, mode = self.parse_stat(stat_out)
79
80 # Write the data
81 for metric, value in metrics:
82 self.gauge(metric, value, tags=tags + new_tags)
83
84 if expected_mode:
85 if mode == expected_mode:
86 status = AgentCheck.OK
87 message = u"Server is in %s mode" % mode
88 else:
89 status = AgentCheck.CRITICAL
90 message = u"Server is in %s mode but check expects %s mode" % (expected_mode, mode)
91 self.service_check('zookeeper.mode', status, message=message)
92
93 def _send_command(self, command, host, port, timeout):
94 sock = socket.socket()
95 sock.settimeout(timeout)
96 buf = StringIO()
97 chunk_size = 1024
98 # try-finally and try-except to stay compatible with python 2.4
99 try:
100 try:
101 # Connect to the zk client port and send the stat command
102 sock.connect((host, port))
103 sock.sendall(command)
104
105 # Read the response into a StringIO buffer
106 chunk = sock.recv(chunk_size)
107 buf.write(chunk)
108 num_reads = 1
109 max_reads = 10000
110 while chunk:
111 if num_reads > max_reads:
112 # Safeguard against an infinite loop
113 raise Exception("Read %s bytes before exceeding max reads of %s. " % (buf.tell(), max_reads))
114 chunk = sock.recv(chunk_size)
115 buf.write(chunk)
116 num_reads += 1
117 except (socket.timeout, socket.error):
118 raise ZKConnectionFailure()
119 finally:
120 sock.close()
121 return buf
122
123 def parse_stat(self, buf):
124 ''' `buf` is a readable file-like object
125 returns a tuple: ([(metric_name, value)], tags)
126 '''
127 metrics = []
128 buf.seek(0)
129
130 # Check the version line to make sure we parse the rest of the
131 # body correctly. Particularly, the Connections val was added in
132 # >= 3.4.4.
133 start_line = buf.readline()
134 match = self.version_pattern.match(start_line)
135 if match is None:
136 raise Exception("Could not parse version from stat command output: %s" % start_line)
137 else:
138 version_tuple = match.groups()
139 has_connections_val = version_tuple >= ('3', '4', '4')
140
141 # Clients:
142 buf.readline() # skip the Clients: header
143 connections = 0
144 client_line = buf.readline().strip()
145 if client_line:
146 connections += 1
147 while client_line:
148 client_line = buf.readline().strip()
149 if client_line:
150 connections += 1
151
152 # Latency min/avg/max: -10/0/20007
153 _, value = buf.readline().split(':')
154 l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
155 metrics.append(('zookeeper.latency.min', l_min))
156 metrics.append(('zookeeper.latency.avg', l_avg))
157 metrics.append(('zookeeper.latency.max', l_max))
158
159 # Received: 101032173
160 _, value = buf.readline().split(':')
161 metrics.append(('zookeeper.bytes_received', long(value.strip())))
162
163 # Sent: 1324
164 _, value = buf.readline().split(':')
165 metrics.append(('zookeeper.bytes_sent', long(value.strip())))
166
167 if has_connections_val:
168 # Connections: 1
169 _, value = buf.readline().split(':')
170 metrics.append(('zookeeper.connections', int(value.strip())))
171 else:
172 # If the zk version doesnt explicitly give the Connections val,
173 # use the value we computed from the client list.
174 metrics.append(('zookeeper.connections', connections))
175
176 # Outstanding: 0
177 _, value = buf.readline().split(':')
178 metrics.append(('zookeeper.bytes_outstanding', long(value.strip())))
179
180 # Zxid: 0x1034799c7
181 _, value = buf.readline().split(':')
182 # Parse as a 64 bit hex int
183 zxid = long(value.strip(), 16)
184 # convert to bytes
185 zxid_bytes = struct.pack('>q', zxid)
186 # the higher order 4 bytes is the epoch
187 (zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
188 # the lower order 4 bytes is the count
189 (zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
190
191 metrics.append(('zookeeper.zxid.epoch', zxid_epoch))
192 metrics.append(('zookeeper.zxid.count', zxid_count))
193
194 # Mode: leader
195 _, value = buf.readline().split(':')
196 mode = value.strip().lower()
197 tags = [u'mode:' + mode]
198
199 # Node count: 487
200 _, value = buf.readline().split(':')
201 metrics.append(('zookeeper.nodes', long(value.strip())))
202
203 return metrics, tags, mode
204
[end of checks.d/zk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/zk.py b/checks.d/zk.py
--- a/checks.d/zk.py
+++ b/checks.d/zk.py
@@ -175,7 +175,10 @@
# Outstanding: 0
_, value = buf.readline().split(':')
+ # Fixme: This metric name is wrong. It should be removed in a major version of the agent
+ # See https://github.com/DataDog/dd-agent/issues/1383
metrics.append(('zookeeper.bytes_outstanding', long(value.strip())))
+ metrics.append(('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
| {"golden_diff": "diff --git a/checks.d/zk.py b/checks.d/zk.py\n--- a/checks.d/zk.py\n+++ b/checks.d/zk.py\n@@ -175,7 +175,10 @@\n \n # Outstanding: 0\n _, value = buf.readline().split(':')\n+ # Fixme: This metric name is wrong. It should be removed in a major version of the agent\n+ # See https://github.com/DataDog/dd-agent/issues/1383\n metrics.append(('zookeeper.bytes_outstanding', long(value.strip())))\n+ metrics.append(('zookeeper.outstanding_requests', long(value.strip())))\n \n # Zxid: 0x1034799c7\n _, value = buf.readline().split(':')\n", "issue": "[zookeeper] bytes_outstanding metric returns number of requests served by zk node\nUser checking through the ZooKeeper check noticed that the bytes_outsanding metric the DD agent is scrapping is not bytes, but rather the number of inflight requests currently being serviced by the ZK node.\n\nhttps://github.com/apache/zookeeper/blob/ca480231e1bdcbe6beda9142766fc3caa2031cd8/src/java/main/org/apache/zookeeper/server/ServerStats.java#L104\n\nhttps://github.com/apache/zookeeper/blob/ca480231e1bdcbe6beda9142766fc3caa2031cd8/src/java/main/org/apache/zookeeper/server/ZooKeeperServer.java#L740\n\n", "before_files": [{"content": "'''\nParses the response from zookeeper's `stat` admin command, which looks like:\n\n```\nZookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT\nClients:\n /10.42.114.160:32634[1](queued=0,recved=12,sent=0)\n /10.37.137.74:21873[1](queued=0,recved=53613,sent=0)\n /10.37.137.74:21876[1](queued=0,recved=57436,sent=0)\n /10.115.77.32:32990[1](queued=0,recved=16,sent=0)\n /10.37.137.74:21891[1](queued=0,recved=55011,sent=0)\n /10.37.137.74:21797[1](queued=0,recved=19431,sent=0)\n\nLatency min/avg/max: -10/0/20007\nReceived: 101032173\nSent: 0\nOutstanding: 0\nZxid: 0x1034799c7\nMode: leader\nNode count: 487\n```\n\nTested with Zookeeper versions 3.0.0 to 3.4.5\n\n'''\n# stdlib\nimport re\nimport socket\nimport struct\nfrom StringIO import StringIO\n\n# project\nfrom checks import AgentCheck\n\nclass ZKConnectionFailure(Exception):\n \"\"\" Raised when we are unable to connect or get the output of a command. \"\"\"\n pass\n\n\nclass ZookeeperCheck(AgentCheck):\n version_pattern = re.compile(r'Zookeeper version: ([^.]+)\\.([^.]+)\\.([^-]+)', flags=re.I)\n\n SOURCE_TYPE_NAME = 'zookeeper'\n\n def check(self, instance):\n host = instance.get('host', 'localhost')\n port = int(instance.get('port', 2181))\n timeout = float(instance.get('timeout', 3.0))\n expected_mode = (instance.get('expected_mode') or '').strip()\n tags = instance.get('tags', [])\n cx_args = (host, port, timeout)\n\n # Send a service check based on the `ruok` response.\n try:\n ruok_out = self._send_command('ruok', *cx_args)\n except ZKConnectionFailure:\n # The server should not respond at all if it's not OK.\n status = AgentCheck.CRITICAL\n message = 'No response from `ruok` command'\n self.increment('zookeeper.timeouts')\n else:\n ruok_out.seek(0)\n ruok = ruok_out.readline()\n if ruok == 'imok':\n status = AgentCheck.OK\n else:\n status = AgentCheck.WARNING\n message = u'Response from the server: %s' % ruok\n self.service_check('zookeeper.ruok', status, message=message)\n\n # Read metrics from the `stat` output.\n try:\n stat_out = self._send_command('stat', *cx_args)\n except ZKConnectionFailure:\n self.increment('zookeeper.timeouts')\n else:\n # Parse the response\n metrics, new_tags, mode = self.parse_stat(stat_out)\n\n # Write the data\n for metric, value in metrics:\n self.gauge(metric, value, tags=tags + new_tags)\n\n if expected_mode:\n if mode == expected_mode:\n status = AgentCheck.OK\n message = u\"Server is in %s mode\" % mode\n else:\n status = AgentCheck.CRITICAL\n message = u\"Server is in %s mode but check expects %s mode\" % (expected_mode, mode)\n self.service_check('zookeeper.mode', status, message=message)\n\n def _send_command(self, command, host, port, timeout):\n sock = socket.socket()\n sock.settimeout(timeout)\n buf = StringIO()\n chunk_size = 1024\n # try-finally and try-except to stay compatible with python 2.4\n try:\n try:\n # Connect to the zk client port and send the stat command\n sock.connect((host, port))\n sock.sendall(command)\n\n # Read the response into a StringIO buffer\n chunk = sock.recv(chunk_size)\n buf.write(chunk)\n num_reads = 1\n max_reads = 10000\n while chunk:\n if num_reads > max_reads:\n # Safeguard against an infinite loop\n raise Exception(\"Read %s bytes before exceeding max reads of %s. \" % (buf.tell(), max_reads))\n chunk = sock.recv(chunk_size)\n buf.write(chunk)\n num_reads += 1\n except (socket.timeout, socket.error):\n raise ZKConnectionFailure()\n finally:\n sock.close()\n return buf\n\n def parse_stat(self, buf):\n ''' `buf` is a readable file-like object\n returns a tuple: ([(metric_name, value)], tags)\n '''\n metrics = []\n buf.seek(0)\n\n # Check the version line to make sure we parse the rest of the\n # body correctly. Particularly, the Connections val was added in\n # >= 3.4.4.\n start_line = buf.readline()\n match = self.version_pattern.match(start_line)\n if match is None:\n raise Exception(\"Could not parse version from stat command output: %s\" % start_line)\n else:\n version_tuple = match.groups()\n has_connections_val = version_tuple >= ('3', '4', '4')\n\n # Clients:\n buf.readline() # skip the Clients: header\n connections = 0\n client_line = buf.readline().strip()\n if client_line:\n connections += 1\n while client_line:\n client_line = buf.readline().strip()\n if client_line:\n connections += 1\n\n # Latency min/avg/max: -10/0/20007\n _, value = buf.readline().split(':')\n l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]\n metrics.append(('zookeeper.latency.min', l_min))\n metrics.append(('zookeeper.latency.avg', l_avg))\n metrics.append(('zookeeper.latency.max', l_max))\n\n # Received: 101032173\n _, value = buf.readline().split(':')\n metrics.append(('zookeeper.bytes_received', long(value.strip())))\n\n # Sent: 1324\n _, value = buf.readline().split(':')\n metrics.append(('zookeeper.bytes_sent', long(value.strip())))\n\n if has_connections_val:\n # Connections: 1\n _, value = buf.readline().split(':')\n metrics.append(('zookeeper.connections', int(value.strip())))\n else:\n # If the zk version doesnt explicitly give the Connections val,\n # use the value we computed from the client list.\n metrics.append(('zookeeper.connections', connections))\n\n # Outstanding: 0\n _, value = buf.readline().split(':')\n metrics.append(('zookeeper.bytes_outstanding', long(value.strip())))\n\n # Zxid: 0x1034799c7\n _, value = buf.readline().split(':')\n # Parse as a 64 bit hex int\n zxid = long(value.strip(), 16)\n # convert to bytes\n zxid_bytes = struct.pack('>q', zxid)\n # the higher order 4 bytes is the epoch\n (zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])\n # the lower order 4 bytes is the count\n (zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])\n\n metrics.append(('zookeeper.zxid.epoch', zxid_epoch))\n metrics.append(('zookeeper.zxid.count', zxid_count))\n\n # Mode: leader\n _, value = buf.readline().split(':')\n mode = value.strip().lower()\n tags = [u'mode:' + mode]\n\n # Node count: 487\n _, value = buf.readline().split(':')\n metrics.append(('zookeeper.nodes', long(value.strip())))\n\n return metrics, tags, mode\n", "path": "checks.d/zk.py"}]} | 3,114 | 171 |
gh_patches_debug_39556 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-6474 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Genius.com now throws unsupported url
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that a **supported** site is broken
- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
Colombia
### Provide a description that is worded well enough to be understood
The genius extractor currently doesn't work
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['-vU', 'https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] [d4e6ef407] (win_exe)
[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19045-SP0 (OpenSSL 1.1.1k 25 Mar 2021)
[debug] exe versions: ffmpeg N-109920-gac6eec1fc2-20230224 (setts), ffprobe N-109920-gac6eec1fc2-20230224
[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4
[debug] Proxy map: {}
[debug] Loaded 1788 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Available version: [email protected], Current version: [email protected]
Current Build Hash: 7c51ebf2a7a9e1f199547cd8dad7df7713655f1c91a95f7e49764a513c52d6b1
yt-dlp is up to date ([email protected])
[generic] Extracting URL: https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens
[generic] cordae-anderson-paak-break-down-the-meaning-of-two-tens: Downloading webpage
WARNING: [generic] Falling back on generic information extractor
[generic] cordae-anderson-paak-break-down-the-meaning-of-two-tens: Extracting information
[debug] Looking for embeds
ERROR: Unsupported URL: https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens
Traceback (most recent call last):
File "yt_dlp\YoutubeDL.py", line 1518, in wrapper
File "yt_dlp\YoutubeDL.py", line 1594, in __extract_info
File "yt_dlp\extractor\common.py", line 694, in extract
File "yt_dlp\extractor\generic.py", line 2510, in _real_extract
yt_dlp.utils.UnsupportedError: Unsupported URL: https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens
```
</issue>
<code>
[start of yt_dlp/extractor/genius.py]
1 from .common import InfoExtractor
2 from ..utils import (
3 ExtractorError,
4 js_to_json,
5 smuggle_url,
6 str_or_none,
7 traverse_obj,
8 unescapeHTML,
9 )
10
11
12 class GeniusIE(InfoExtractor):
13 _VALID_URL = r'https?://(?:www\.)?genius\.com/videos/(?P<id>[^?/#]+)'
14 _TESTS = [{
15 'url': 'https://genius.com/videos/Vince-staples-breaks-down-the-meaning-of-when-sparks-fly',
16 'md5': '64c2ad98cfafcfda23bfa0ad0c512f4c',
17 'info_dict': {
18 'id': '6313303597112',
19 'ext': 'mp4',
20 'title': 'Vince Staples Breaks Down The Meaning Of “When Sparks Fly”',
21 'description': 'md5:bc15e00342c537c0039d414423ae5752',
22 'tags': 'count:1',
23 'uploader_id': '4863540648001',
24 'duration': 388.416,
25 'upload_date': '20221005',
26 'timestamp': 1664982341,
27 'thumbnail': r're:^https?://.*\.jpg$',
28 },
29 }, {
30 'url': 'https://genius.com/videos/Breaking-down-drakes-certified-lover-boy-kanye-beef-way-2-sexy-cudi',
31 'md5': 'b8ed87a5efd1473bd027c20a969d4060',
32 'info_dict': {
33 'id': '6271792014001',
34 'ext': 'mp4',
35 'title': 'md5:c6355f7fa8a70bc86492a3963919fc15',
36 'description': 'md5:1774638c31548b31b037c09e9b821393',
37 'tags': 'count:3',
38 'uploader_id': '4863540648001',
39 'duration': 2685.099,
40 'upload_date': '20210909',
41 'timestamp': 1631209167,
42 'thumbnail': r're:^https?://.*\.jpg$',
43 },
44 }]
45
46 def _real_extract(self, url):
47 display_id = self._match_id(url)
48 webpage = self._download_webpage(url, display_id)
49
50 metadata = self._search_json(
51 r'<meta content="', webpage, 'metadata', display_id, transform_source=unescapeHTML)
52 video_id = traverse_obj(
53 metadata, ('video', 'provider_id'),
54 ('dfp_kv', lambda _, x: x['name'] == 'brightcove_video_id', 'values', 0), get_all=False)
55 if not video_id:
56 raise ExtractorError('Brightcove video id not found in webpage')
57
58 config = self._search_json(r'var\s*APP_CONFIG\s*=', webpage, 'config', video_id, default={})
59 account_id = config.get('brightcove_account_id', '4863540648001')
60 player_id = traverse_obj(
61 config, 'brightcove_standard_web_player_id', 'brightcove_standard_no_autoplay_web_player_id',
62 'brightcove_modal_web_player_id', 'brightcove_song_story_web_player_id', default='S1ZcmcOC1x')
63
64 return self.url_result(
65 smuggle_url(
66 f'https://players.brightcove.net/{account_id}/{player_id}_default/index.html?videoId={video_id}',
67 {'referrer': url}), 'BrightcoveNew', video_id)
68
69
70 class GeniusLyricsIE(InfoExtractor):
71 _VALID_URL = r'https?://(?:www\.)?genius\.com/(?P<id>[^?/#]+)-lyrics[?/#]?'
72 _TESTS = [{
73 'url': 'https://genius.com/Lil-baby-heyy-lyrics',
74 'playlist_mincount': 2,
75 'info_dict': {
76 'id': '8454545',
77 'title': 'Heyy',
78 'description': 'Heyy by Lil Baby',
79 },
80 }, {
81 'url': 'https://genius.com/Outkast-two-dope-boyz-in-a-cadillac-lyrics',
82 'playlist_mincount': 1,
83 'info_dict': {
84 'id': '36239',
85 'title': 'Two Dope Boyz (In a Cadillac)',
86 'description': 'Two Dope Boyz (In a Cadillac) by OutKast',
87 },
88 }, {
89 'url': 'https://genius.com/Playboi-carti-rip-lyrics',
90 'playlist_mincount': 1,
91 'info_dict': {
92 'id': '3710582',
93 'title': 'R.I.P.',
94 'description': 'R.I.P. by Playboi Carti',
95 },
96 }]
97
98 def _real_extract(self, url):
99 display_id = self._match_id(url)
100 webpage = self._download_webpage(url, display_id)
101
102 json_string = self._search_json(
103 r'window\.__PRELOADED_STATE__\s*=\s*JSON\.parse\(', webpage, 'json string',
104 display_id, transform_source=js_to_json, contains_pattern=r'\'{(?s:.+)}\'')
105 song_info = self._parse_json(json_string, display_id)
106 song_id = str_or_none(traverse_obj(song_info, ('songPage', 'song')))
107 if not song_id:
108 raise ExtractorError('Song id not found in webpage')
109
110 title = traverse_obj(
111 song_info, ('songPage', 'trackingData', lambda _, x: x['key'] == 'Title', 'value'),
112 get_all=False, default='untitled')
113 artist = traverse_obj(
114 song_info, ('songPage', 'trackingData', lambda _, x: x['key'] == 'Primary Artist', 'value'),
115 get_all=False, default='unknown artist')
116 media = traverse_obj(
117 song_info, ('entities', 'songs', song_id, 'media'), expected_type=list, default=[])
118
119 entries = []
120 for m in media:
121 if m.get('type') in ('video', 'audio') and m.get('url'):
122 if m.get('provider') == 'spotify':
123 self.to_screen(f'{song_id}: Skipping Spotify audio embed')
124 else:
125 entries.append(self.url_result(m['url']))
126
127 return self.playlist_result(entries, song_id, title, f'{title} by {artist}')
128
[end of yt_dlp/extractor/genius.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/yt_dlp/extractor/genius.py b/yt_dlp/extractor/genius.py
--- a/yt_dlp/extractor/genius.py
+++ b/yt_dlp/extractor/genius.py
@@ -10,7 +10,7 @@
class GeniusIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?genius\.com/videos/(?P<id>[^?/#]+)'
+ _VALID_URL = r'https?://(?:www\.)?genius\.com/(?:videos|(?P<article>a))/(?P<id>[^?/#]+)'
_TESTS = [{
'url': 'https://genius.com/videos/Vince-staples-breaks-down-the-meaning-of-when-sparks-fly',
'md5': '64c2ad98cfafcfda23bfa0ad0c512f4c',
@@ -41,19 +41,37 @@
'timestamp': 1631209167,
'thumbnail': r're:^https?://.*\.jpg$',
},
+ }, {
+ 'url': 'https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens',
+ 'md5': 'f98a4e03b16b0a2821bd6e52fb3cc9d7',
+ 'info_dict': {
+ 'id': '6321509903112',
+ 'ext': 'mp4',
+ 'title': 'Cordae & Anderson .Paak Breaks Down The Meaning Of “Two Tens”',
+ 'description': 'md5:1255f0e1161d07342ce56a8464ac339d',
+ 'tags': ['song id: 5457554'],
+ 'uploader_id': '4863540648001',
+ 'duration': 361.813,
+ 'upload_date': '20230301',
+ 'timestamp': 1677703908,
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ },
}]
def _real_extract(self, url):
- display_id = self._match_id(url)
+ display_id, is_article = self._match_valid_url(url).group('id', 'article')
webpage = self._download_webpage(url, display_id)
metadata = self._search_json(
- r'<meta content="', webpage, 'metadata', display_id, transform_source=unescapeHTML)
- video_id = traverse_obj(
- metadata, ('video', 'provider_id'),
- ('dfp_kv', lambda _, x: x['name'] == 'brightcove_video_id', 'values', 0), get_all=False)
+ r'<meta content="', webpage, 'metadata', display_id,
+ end_pattern=r'"\s+itemprop="page_data"', transform_source=unescapeHTML)
+ video_id = traverse_obj(metadata, (
+ (('article', 'media', ...), ('video', None)),
+ ('provider_id', ('dfp_kv', lambda _, v: v['name'] == 'brightcove_video_id', 'values', ...))),
+ get_all=False)
if not video_id:
- raise ExtractorError('Brightcove video id not found in webpage')
+ # Not all article pages have videos, expect the error
+ raise ExtractorError('Brightcove video ID not found in webpage', expected=bool(is_article))
config = self._search_json(r'var\s*APP_CONFIG\s*=', webpage, 'config', video_id, default={})
account_id = config.get('brightcove_account_id', '4863540648001')
@@ -68,7 +86,7 @@
class GeniusLyricsIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?genius\.com/(?P<id>[^?/#]+)-lyrics[?/#]?'
+ _VALID_URL = r'https?://(?:www\.)?genius\.com/(?P<id>[^?/#]+)-lyrics(?:[?/#]|$)'
_TESTS = [{
'url': 'https://genius.com/Lil-baby-heyy-lyrics',
'playlist_mincount': 2,
| {"golden_diff": "diff --git a/yt_dlp/extractor/genius.py b/yt_dlp/extractor/genius.py\n--- a/yt_dlp/extractor/genius.py\n+++ b/yt_dlp/extractor/genius.py\n@@ -10,7 +10,7 @@\n \n \n class GeniusIE(InfoExtractor):\n- _VALID_URL = r'https?://(?:www\\.)?genius\\.com/videos/(?P<id>[^?/#]+)'\n+ _VALID_URL = r'https?://(?:www\\.)?genius\\.com/(?:videos|(?P<article>a))/(?P<id>[^?/#]+)'\n _TESTS = [{\n 'url': 'https://genius.com/videos/Vince-staples-breaks-down-the-meaning-of-when-sparks-fly',\n 'md5': '64c2ad98cfafcfda23bfa0ad0c512f4c',\n@@ -41,19 +41,37 @@\n 'timestamp': 1631209167,\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n+ }, {\n+ 'url': 'https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens',\n+ 'md5': 'f98a4e03b16b0a2821bd6e52fb3cc9d7',\n+ 'info_dict': {\n+ 'id': '6321509903112',\n+ 'ext': 'mp4',\n+ 'title': 'Cordae & Anderson .Paak Breaks Down The Meaning Of \u201cTwo Tens\u201d',\n+ 'description': 'md5:1255f0e1161d07342ce56a8464ac339d',\n+ 'tags': ['song id: 5457554'],\n+ 'uploader_id': '4863540648001',\n+ 'duration': 361.813,\n+ 'upload_date': '20230301',\n+ 'timestamp': 1677703908,\n+ 'thumbnail': r're:^https?://.*\\.jpg$',\n+ },\n }]\n \n def _real_extract(self, url):\n- display_id = self._match_id(url)\n+ display_id, is_article = self._match_valid_url(url).group('id', 'article')\n webpage = self._download_webpage(url, display_id)\n \n metadata = self._search_json(\n- r'<meta content=\"', webpage, 'metadata', display_id, transform_source=unescapeHTML)\n- video_id = traverse_obj(\n- metadata, ('video', 'provider_id'),\n- ('dfp_kv', lambda _, x: x['name'] == 'brightcove_video_id', 'values', 0), get_all=False)\n+ r'<meta content=\"', webpage, 'metadata', display_id,\n+ end_pattern=r'\"\\s+itemprop=\"page_data\"', transform_source=unescapeHTML)\n+ video_id = traverse_obj(metadata, (\n+ (('article', 'media', ...), ('video', None)),\n+ ('provider_id', ('dfp_kv', lambda _, v: v['name'] == 'brightcove_video_id', 'values', ...))),\n+ get_all=False)\n if not video_id:\n- raise ExtractorError('Brightcove video id not found in webpage')\n+ # Not all article pages have videos, expect the error\n+ raise ExtractorError('Brightcove video ID not found in webpage', expected=bool(is_article))\n \n config = self._search_json(r'var\\s*APP_CONFIG\\s*=', webpage, 'config', video_id, default={})\n account_id = config.get('brightcove_account_id', '4863540648001')\n@@ -68,7 +86,7 @@\n \n \n class GeniusLyricsIE(InfoExtractor):\n- _VALID_URL = r'https?://(?:www\\.)?genius\\.com/(?P<id>[^?/#]+)-lyrics[?/#]?'\n+ _VALID_URL = r'https?://(?:www\\.)?genius\\.com/(?P<id>[^?/#]+)-lyrics(?:[?/#]|$)'\n _TESTS = [{\n 'url': 'https://genius.com/Lil-baby-heyy-lyrics',\n 'playlist_mincount': 2,\n", "issue": "Genius.com now throws unsupported url\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that a **supported** site is broken\n- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nColombia\n\n### Provide a description that is worded well enough to be understood\n\nThe genius extractor currently doesn't work\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['-vU', 'https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens']\r\n[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] [d4e6ef407] (win_exe)\r\n[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19045-SP0 (OpenSSL 1.1.1k 25 Mar 2021)\r\n[debug] exe versions: ffmpeg N-109920-gac6eec1fc2-20230224 (setts), ffprobe N-109920-gac6eec1fc2-20230224\r\n[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1788 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest\r\nAvailable version: [email protected], Current version: [email protected]\r\nCurrent Build Hash: 7c51ebf2a7a9e1f199547cd8dad7df7713655f1c91a95f7e49764a513c52d6b1\r\nyt-dlp is up to date ([email protected])\r\n[generic] Extracting URL: https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens\r\n[generic] cordae-anderson-paak-break-down-the-meaning-of-two-tens: Downloading webpage\r\nWARNING: [generic] Falling back on generic information extractor\r\n[generic] cordae-anderson-paak-break-down-the-meaning-of-two-tens: Extracting information\r\n[debug] Looking for embeds\r\nERROR: Unsupported URL: https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens\r\nTraceback (most recent call last):\r\n File \"yt_dlp\\YoutubeDL.py\", line 1518, in wrapper\r\n File \"yt_dlp\\YoutubeDL.py\", line 1594, in __extract_info\r\n File \"yt_dlp\\extractor\\common.py\", line 694, in extract\r\n File \"yt_dlp\\extractor\\generic.py\", line 2510, in _real_extract\r\nyt_dlp.utils.UnsupportedError: Unsupported URL: https://genius.com/a/cordae-anderson-paak-break-down-the-meaning-of-two-tens\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n js_to_json,\n smuggle_url,\n str_or_none,\n traverse_obj,\n unescapeHTML,\n)\n\n\nclass GeniusIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?genius\\.com/videos/(?P<id>[^?/#]+)'\n _TESTS = [{\n 'url': 'https://genius.com/videos/Vince-staples-breaks-down-the-meaning-of-when-sparks-fly',\n 'md5': '64c2ad98cfafcfda23bfa0ad0c512f4c',\n 'info_dict': {\n 'id': '6313303597112',\n 'ext': 'mp4',\n 'title': 'Vince Staples Breaks Down The Meaning Of \u201cWhen Sparks Fly\u201d',\n 'description': 'md5:bc15e00342c537c0039d414423ae5752',\n 'tags': 'count:1',\n 'uploader_id': '4863540648001',\n 'duration': 388.416,\n 'upload_date': '20221005',\n 'timestamp': 1664982341,\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n }, {\n 'url': 'https://genius.com/videos/Breaking-down-drakes-certified-lover-boy-kanye-beef-way-2-sexy-cudi',\n 'md5': 'b8ed87a5efd1473bd027c20a969d4060',\n 'info_dict': {\n 'id': '6271792014001',\n 'ext': 'mp4',\n 'title': 'md5:c6355f7fa8a70bc86492a3963919fc15',\n 'description': 'md5:1774638c31548b31b037c09e9b821393',\n 'tags': 'count:3',\n 'uploader_id': '4863540648001',\n 'duration': 2685.099,\n 'upload_date': '20210909',\n 'timestamp': 1631209167,\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n }]\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n\n metadata = self._search_json(\n r'<meta content=\"', webpage, 'metadata', display_id, transform_source=unescapeHTML)\n video_id = traverse_obj(\n metadata, ('video', 'provider_id'),\n ('dfp_kv', lambda _, x: x['name'] == 'brightcove_video_id', 'values', 0), get_all=False)\n if not video_id:\n raise ExtractorError('Brightcove video id not found in webpage')\n\n config = self._search_json(r'var\\s*APP_CONFIG\\s*=', webpage, 'config', video_id, default={})\n account_id = config.get('brightcove_account_id', '4863540648001')\n player_id = traverse_obj(\n config, 'brightcove_standard_web_player_id', 'brightcove_standard_no_autoplay_web_player_id',\n 'brightcove_modal_web_player_id', 'brightcove_song_story_web_player_id', default='S1ZcmcOC1x')\n\n return self.url_result(\n smuggle_url(\n f'https://players.brightcove.net/{account_id}/{player_id}_default/index.html?videoId={video_id}',\n {'referrer': url}), 'BrightcoveNew', video_id)\n\n\nclass GeniusLyricsIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?genius\\.com/(?P<id>[^?/#]+)-lyrics[?/#]?'\n _TESTS = [{\n 'url': 'https://genius.com/Lil-baby-heyy-lyrics',\n 'playlist_mincount': 2,\n 'info_dict': {\n 'id': '8454545',\n 'title': 'Heyy',\n 'description': 'Heyy by Lil Baby',\n },\n }, {\n 'url': 'https://genius.com/Outkast-two-dope-boyz-in-a-cadillac-lyrics',\n 'playlist_mincount': 1,\n 'info_dict': {\n 'id': '36239',\n 'title': 'Two Dope Boyz (In a Cadillac)',\n 'description': 'Two Dope Boyz (In a Cadillac) by OutKast',\n },\n }, {\n 'url': 'https://genius.com/Playboi-carti-rip-lyrics',\n 'playlist_mincount': 1,\n 'info_dict': {\n 'id': '3710582',\n 'title': 'R.I.P.',\n 'description': 'R.I.P. by Playboi Carti',\n },\n }]\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n\n json_string = self._search_json(\n r'window\\.__PRELOADED_STATE__\\s*=\\s*JSON\\.parse\\(', webpage, 'json string',\n display_id, transform_source=js_to_json, contains_pattern=r'\\'{(?s:.+)}\\'')\n song_info = self._parse_json(json_string, display_id)\n song_id = str_or_none(traverse_obj(song_info, ('songPage', 'song')))\n if not song_id:\n raise ExtractorError('Song id not found in webpage')\n\n title = traverse_obj(\n song_info, ('songPage', 'trackingData', lambda _, x: x['key'] == 'Title', 'value'),\n get_all=False, default='untitled')\n artist = traverse_obj(\n song_info, ('songPage', 'trackingData', lambda _, x: x['key'] == 'Primary Artist', 'value'),\n get_all=False, default='unknown artist')\n media = traverse_obj(\n song_info, ('entities', 'songs', song_id, 'media'), expected_type=list, default=[])\n\n entries = []\n for m in media:\n if m.get('type') in ('video', 'audio') and m.get('url'):\n if m.get('provider') == 'spotify':\n self.to_screen(f'{song_id}: Skipping Spotify audio embed')\n else:\n entries.append(self.url_result(m['url']))\n\n return self.playlist_result(entries, song_id, title, f'{title} by {artist}')\n", "path": "yt_dlp/extractor/genius.py"}]} | 3,652 | 1,022 |
gh_patches_debug_4006 | rasdani/github-patches | git_diff | kserve__kserve-818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kubernetes probes using HealthHandler endpoint succeed when models are not ready
/kind bug
**What steps did you take and what happened:**
Used the [HealthHandler endpoint](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L62-L63) to set up a Kubernetes readiness probe. The probe succeeded even though the model was not ready.
**What did you expect to happen:**
Readiness probe failure when model is not ready.
**Anything else you would like to add:**
[Kubernetes probe documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) says `Any code greater than or equal to 200 and less than 400 indicates success. Any other code indicates failure`, which seems to imply that the [response contents](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L108-L111) (which include the model's `.ready` property) are ignored.
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version:
- Kubeflow version:
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
</issue>
<code>
[start of python/kfserving/kfserving/kfserver.py]
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import logging
17 import json
18 from typing import List, Dict
19 import tornado.ioloop
20 import tornado.web
21 import tornado.httpserver
22 import tornado.log
23 from kfserving.handlers.http import PredictHandler, ExplainHandler
24 from kfserving import KFModel
25
26 DEFAULT_HTTP_PORT = 8080
27 DEFAULT_GRPC_PORT = 8081
28 DEFAULT_MAX_BUFFER_SIZE = 104857600
29
30 parser = argparse.ArgumentParser(add_help=False)
31 parser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,
32 help='The HTTP Port listened to by the model server.')
33 parser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,
34 help='The GRPC Port listened to by the model server.')
35 parser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,
36 help='The max buffer size for tornado.')
37 parser.add_argument('--workers', default=0, type=int,
38 help='The number of works to fork')
39 args, _ = parser.parse_known_args()
40
41 tornado.log.enable_pretty_logging()
42
43 class KFServer:
44 def __init__(self, http_port: int = args.http_port,
45 grpc_port: int = args.grpc_port,
46 max_buffer_size: int = args.max_buffer_size,
47 workers: int = args.workers):
48 self.registered_models = {}
49 self.http_port = http_port
50 self.grpc_port = grpc_port
51 self.max_buffer_size = max_buffer_size
52 self.workers = workers
53 self._http_server = None
54
55 def create_application(self):
56 return tornado.web.Application([
57 # Server Liveness API returns 200 if server is alive.
58 (r"/", LivenessHandler),
59 (r"/v1/models",
60 ListHandler, dict(models=self.registered_models)),
61 # Model Health API returns 200 if model is ready to serve.
62 (r"/v1/models/([a-zA-Z0-9_-]+)",
63 HealthHandler, dict(models=self.registered_models)),
64 (r"/v1/models/([a-zA-Z0-9_-]+):predict",
65 PredictHandler, dict(models=self.registered_models)),
66 (r"/v1/models/([a-zA-Z0-9_-]+):explain",
67 ExplainHandler, dict(models=self.registered_models)),
68 ])
69
70 def start(self, models: List[KFModel]):
71 for model in models:
72 self.register_model(model)
73
74 self._http_server = tornado.httpserver.HTTPServer(
75 self.create_application(), max_buffer_size=self.max_buffer_size)
76
77 logging.info("Listening on port %s", self.http_port)
78 self._http_server.bind(self.http_port)
79 logging.info("Will fork %d workers", self.workers)
80 self._http_server.start(self.workers)
81 tornado.ioloop.IOLoop.current().start()
82
83 def register_model(self, model: KFModel):
84 if not model.name:
85 raise Exception(
86 "Failed to register model, model.name must be provided.")
87 self.registered_models[model.name] = model
88 logging.info("Registering model: %s", model.name)
89
90
91 class LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods
92 def get(self):
93 self.write("Alive")
94
95
96 class HealthHandler(tornado.web.RequestHandler):
97 def initialize(self, models: Dict[str, KFModel]):
98 self.models = models # pylint:disable=attribute-defined-outside-init
99
100 def get(self, name: str):
101 if name not in self.models:
102 raise tornado.web.HTTPError(
103 status_code=404,
104 reason="Model with name %s does not exist." % name
105 )
106
107 model = self.models[name]
108 self.write(json.dumps({
109 "name": model.name,
110 "ready": model.ready
111 }))
112
113
114 class ListHandler(tornado.web.RequestHandler):
115 def initialize(self, models: Dict[str, KFModel]):
116 self.models = models # pylint:disable=attribute-defined-outside-init
117
118 def get(self):
119 self.write(json.dumps(list(self.models.values())))
120
[end of python/kfserving/kfserving/kfserver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py
--- a/python/kfserving/kfserving/kfserver.py
+++ b/python/kfserving/kfserving/kfserver.py
@@ -105,6 +105,12 @@
)
model = self.models[name]
+ if not model.ready:
+ raise tornado.web.HTTPError(
+ status_code=503,
+ reason="Model with name %s is not ready." % name
+ )
+
self.write(json.dumps({
"name": model.name,
"ready": model.ready
| {"golden_diff": "diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py\n--- a/python/kfserving/kfserving/kfserver.py\n+++ b/python/kfserving/kfserving/kfserver.py\n@@ -105,6 +105,12 @@\n )\n \n model = self.models[name]\n+ if not model.ready:\n+ raise tornado.web.HTTPError(\n+ status_code=503,\n+ reason=\"Model with name %s is not ready.\" % name\n+ )\n+\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n", "issue": "kubernetes probes using HealthHandler endpoint succeed when models are not ready\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nUsed the [HealthHandler endpoint](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L62-L63) to set up a Kubernetes readiness probe. The probe succeeded even though the model was not ready. \r\n\r\n**What did you expect to happen:**\r\nReadiness probe failure when model is not ready. \r\n\r\n**Anything else you would like to add:**\r\n[Kubernetes probe documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) says `Any code greater than or equal to 200 and less than 400 indicates success. Any other code indicates failure`, which seems to imply that the [response contents](https://github.com/kubeflow/kfserving/blob/92c5f3c9df662c853beef86bddf7f30ab5d8b996/python/kfserving/kfserving/kfserver.py#L108-L111) (which include the model's `.ready` property) are ignored. \r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version:\r\n- Kubeflow version:\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport json\nfrom typing import List, Dict\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.log\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\nDEFAULT_MAX_BUFFER_SIZE = 104857600\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--max_buffer_size', default=DEFAULT_MAX_BUFFER_SIZE, type=int,\n help='The max buffer size for tornado.')\nparser.add_argument('--workers', default=0, type=int,\n help='The number of works to fork')\nargs, _ = parser.parse_known_args()\n\ntornado.log.enable_pretty_logging()\n\nclass KFServer:\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n max_buffer_size: int = args.max_buffer_size,\n workers: int = args.workers):\n self.registered_models = {}\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.max_buffer_size = max_buffer_size\n self.workers = workers\n self._http_server = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: List[KFModel]):\n for model in models:\n self.register_model(model)\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application(), max_buffer_size=self.max_buffer_size)\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n tornado.ioloop.IOLoop.current().start()\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models[model.name] = model\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n model = self.models[name]\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps(list(self.models.values())))\n", "path": "python/kfserving/kfserving/kfserver.py"}]} | 2,180 | 145 |
gh_patches_debug_34152 | rasdani/github-patches | git_diff | fossasia__open-event-server-4040 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: sentry cannot catch internal server errors
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
**Current behavior:**
<!-- Describe how the bug manifests. -->
sentry cannot catch internal server errors, because api frameworks catches each and every exception
**Expected behavior:**
<!-- Describe what the behavior would be without the bug. -->
Based on config variable, framework should not catch exceptions and instead app should handle them and return proper error response
</issue>
<code>
[start of config.py]
1 # -*- coding: utf-8 -*-
2 import os
3 from envparse import env
4
5 env.read_envfile()
6
7 basedir = os.path.abspath(os.path.dirname(__file__))
8
9 VERSION_NAME = '2.0.0-alpha.1'
10
11 LANGUAGES = {
12 'en': 'English',
13 'bn': 'Bengali/Bangla',
14 'zh_Hans': 'Chinese (Simplified)',
15 'zh_Hant': 'Chinese (Traditional)',
16 'fr': 'French',
17 'de': 'German',
18 'id': 'Indonesian',
19 'ko': 'Korean',
20 'pl': 'Polish',
21 'es': 'Spanish',
22 'th': 'Thai',
23 'vi': 'Vietnamese',
24 'hi': 'Hindi',
25 'ja': 'Japanese',
26 'ru': 'Russian',
27 }
28
29
30 class Config(object):
31 """
32 The base configuration option. Contains the defaults.
33 """
34
35 DEBUG = False
36
37 DEVELOPMENT = False
38 STAGING = False
39 PRODUCTION = False
40 TESTING = False
41
42 CACHING = False
43 PROFILE = False
44 SQLALCHEMY_RECORD_QUERIES = False
45
46 FLASK_ADMIN_SWATCH = 'lumen'
47
48 VERSION = VERSION_NAME
49 SQLALCHEMY_TRACK_MODIFICATIONS = True
50 ERROR_404_HELP = False
51 CSRF_ENABLED = True
52 SERVER_NAME = env('SERVER_NAME', default=None)
53 CORS_HEADERS = 'Content-Type'
54 SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)
55 SERVE_STATIC = env.bool('SERVE_STATIC', default=False)
56 DATABASE_QUERY_TIMEOUT = 0.1
57 SOFT_DELETE = True
58 PROPOGATE_ERROR = env.bool('PROPOGATE_ERROR', default=False)
59 DASHERIZE_API = True
60 ETAG = True
61 SENTRY_DSN = env('SENTRY_DSN', default=None)
62
63 if not SQLALCHEMY_DATABASE_URI:
64 print('`DATABASE_URL` either not exported or empty')
65 exit()
66
67 BASE_DIR = basedir
68 FORCE_SSL = os.getenv('FORCE_SSL', 'no') == 'yes'
69
70 if SERVE_STATIC:
71 UPLOADS_FOLDER = BASE_DIR + '/static/uploads/'
72 TEMP_UPLOADS_FOLDER = BASE_DIR + '/static/uploads/temp/'
73 UPLOAD_FOLDER = UPLOADS_FOLDER
74 STATIC_URL = '/static/'
75 STATIC_ROOT = 'staticfiles'
76 STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
77
78 if FORCE_SSL:
79 PREFERRED_URL_SCHEME = 'https'
80
81
82 class ProductionConfig(Config):
83 """
84 The configuration for a production environment
85 """
86
87 MINIFY_PAGE = True
88 PRODUCTION = True
89 CACHING = True
90
91 # if force on
92
93
94 class StagingConfig(ProductionConfig):
95 """
96 The configuration for a staging environment
97 """
98
99 PRODUCTION = False
100 STAGING = True
101
102
103 class DevelopmentConfig(Config):
104 """
105 The configuration for a development environment
106 """
107
108 DEVELOPMENT = True
109 DEBUG = True
110 CACHING = True
111 PROPOGATE_ERROR = True
112
113 # Test database performance
114 SQLALCHEMY_RECORD_QUERIES = True
115
116
117 class TestingConfig(Config):
118 """
119 The configuration for a test suit
120 """
121 TESTING = True
122 CELERY_ALWAYS_EAGER = True
123 CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
124 SQLALCHEMY_RECORD_QUERIES = True
125 DEBUG_TB_ENABLED = False
126 BROKER_BACKEND = 'memory'
127 SQLALCHEMY_DATABASE_URI = env('TEST_DATABASE_URL', default=None)
128 PROPOGATE_ERROR = True
129
[end of config.py]
[start of app/__init__.py]
1 # Ignore ExtDeprecationWarnings for Flask 0.11 - see http://stackoverflow.com/a/38080580
2 import warnings
3 from flask.exthook import ExtDeprecationWarning
4
5 warnings.simplefilter('ignore', ExtDeprecationWarning)
6 # Keep it before flask extensions are imported
7
8 from pytz import utc
9
10 from app.helpers.scheduled_jobs import send_mail_to_expired_orders, empty_trash, send_after_event_mail, \
11 send_event_fee_notification, send_event_fee_notification_followup
12
13 from celery import Celery
14 from celery.signals import after_task_publish
15 import logging
16 import os.path
17 from os import environ
18 from envparse import env
19 import sys
20 from flask import Flask
21 from app.settings import get_settings, get_setts
22 from flask_migrate import Migrate, MigrateCommand
23 from flask_script import Manager
24 from flask_login import current_user
25 from flask_jwt import JWT
26 from datetime import timedelta
27 from flask_cors import CORS
28 from raven.contrib.flask import Sentry
29
30 import sqlalchemy as sa
31
32 import stripe
33 from app.settings import get_settings
34 from app.helpers.flask_ext.helpers import SilentUndefined, camel_case, slugify, MiniJSONEncoder
35 from app.helpers.payment import forex
36 from app.models import db
37 from app.models.user import User
38 from app.models.event import Event
39 from app.models.session import Session
40 from app.api.helpers.jwt import jwt_authenticate, jwt_identity
41 from helpers.formatter import operation_name
42 from app.helpers.data_getter import DataGetter
43 from app.helpers.flask_ext.errors import NotFoundError, PermissionDeniedError, ServerError, ValidationError
44 from apscheduler.schedulers.background import BackgroundScheduler
45 from app.helpers.data import DataManager, delete_from_db
46 from app.helpers.helpers import send_after_event
47 from app.helpers.cache import cache
48 from helpers.helpers import send_email_for_expired_orders
49 from werkzeug.contrib.profiler import ProfilerMiddleware
50 from app.views import BlueprintsManager
51 from app.helpers.auth import AuthManager
52
53
54 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
55
56 app = Flask(__name__)
57
58 env.read_envfile()
59
60
61 class ReverseProxied(object):
62 """
63 ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel
64 """
65
66 def __init__(self, app):
67 self.app = app
68
69 def __call__(self, environ, start_response):
70 scheme = environ.get('HTTP_X_FORWARDED_PROTO')
71 if scheme:
72 environ['wsgi.url_scheme'] = scheme
73 if os.getenv('FORCE_SSL', 'no') == 'yes':
74 environ['wsgi.url_scheme'] = 'https'
75 return self.app(environ, start_response)
76
77
78 app.wsgi_app = ReverseProxied(app.wsgi_app)
79
80
81 def create_app():
82 BlueprintsManager.register(app)
83 Migrate(app, db)
84
85 app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))
86 db.init_app(app)
87 _manager = Manager(app)
88 _manager.add_command('db', MigrateCommand)
89
90 if app.config['CACHING']:
91 cache.init_app(app, config={'CACHE_TYPE': 'simple'})
92 else:
93 cache.init_app(app, config={'CACHE_TYPE': 'null'})
94
95 stripe.api_key = 'SomeStripeKey'
96 app.secret_key = 'super secret key'
97 app.json_encoder = MiniJSONEncoder
98 app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
99 app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
100
101 app.logger.addHandler(logging.StreamHandler(sys.stdout))
102 app.logger.setLevel(logging.ERROR)
103 app.jinja_env.add_extension('jinja2.ext.do')
104 app.jinja_env.add_extension('jinja2.ext.loopcontrols')
105 app.jinja_env.undefined = SilentUndefined
106 app.jinja_env.filters['operation_name'] = operation_name
107
108 # set up jwt
109 app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
110 app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)
111 app.config['JWT_AUTH_URL_RULE'] = '/auth/session'
112 _jwt = JWT(app, jwt_authenticate, jwt_identity)
113
114 # setup celery
115 app.config['CELERY_BROKER_URL'] = environ.get('REDIS_URL', 'redis://localhost:6379/0')
116 app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']
117
118 CORS(app, resources={r"/*": {"origins": "*"}})
119 AuthManager.init_login(app)
120
121 if app.config['TESTING'] and app.config['PROFILE']:
122 # Profiling
123 app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
124
125 # nextgen api
126 with app.app_context():
127 from app.api.bootstrap import api_v1
128 from app.api.uploads import upload_routes
129 app.register_blueprint(api_v1)
130 app.register_blueprint(upload_routes)
131
132 sa.orm.configure_mappers()
133
134 if app.config['SERVE_STATIC']:
135 app.add_url_rule('/static/<path:filename>',
136 endpoint='static',
137 view_func=app.send_static_file)
138
139 # sentry
140 if app.config['SENTRY_DSN']:
141 sentry = Sentry(dsn=app.config['SENTRY_DSN'])
142 sentry.init_app(app)
143
144 return app, _manager, db, _jwt
145
146
147 current_app, manager, database, jwt = create_app()
148
149
150 # http://stackoverflow.com/questions/26724623/
151 @app.before_request
152 def track_user():
153 if current_user.is_authenticated:
154 current_user.update_lat()
155
156
157 def make_celery(app):
158 celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
159 celery.conf.update(app.config)
160 task_base = celery.Task
161
162 class ContextTask(task_base):
163 abstract = True
164
165 def __call__(self, *args, **kwargs):
166 if current_app.config['TESTING']:
167 with app.test_request_context():
168 return task_base.__call__(self, *args, **kwargs)
169 with app.app_context():
170 return task_base.__call__(self, *args, **kwargs)
171
172 celery.Task = ContextTask
173 return celery
174
175
176 celery = make_celery(current_app)
177
178
179 # http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists
180 @after_task_publish.connect
181 def update_sent_state(sender=None, body=None, **kwargs):
182 # the task may not exist if sent using `send_task` which
183 # sends tasks by name, so fall back to the default result backend
184 # if that is the case.
185 task = celery.tasks.get(sender)
186 backend = task.backend if task else celery.backend
187 backend.store_result(body['id'], None, 'WAITING')
188
189
190 # register celery tasks. removing them will cause the tasks to not function. so don't remove them
191 # it is important to register them after celery is defined to resolve circular imports
192
193 #import api.helpers.tasks
194 import helpers.tasks
195
196
197 scheduler = BackgroundScheduler(timezone=utc)
198 scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)
199 scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)
200 scheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)
201 scheduler.add_job(send_event_fee_notification, 'cron', day=1)
202 scheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)
203 scheduler.start()
204
205 if __name__ == '__main__':
206 current_app.run()
207
[end of app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/__init__.py b/app/__init__.py
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -17,7 +17,7 @@
from os import environ
from envparse import env
import sys
-from flask import Flask
+from flask import Flask, json, make_response
from app.settings import get_settings, get_setts
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
@@ -26,6 +26,8 @@
from datetime import timedelta
from flask_cors import CORS
from raven.contrib.flask import Sentry
+from flask_rest_jsonapi.errors import jsonapi_errors
+from flask_rest_jsonapi.exceptions import JsonApiException
import sqlalchemy as sa
@@ -202,5 +204,16 @@
scheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)
scheduler.start()
+
[email protected](500)
+def internal_server_error(error):
+ if current_app.config['PROPOGATE_ERROR'] is True:
+ exc = JsonApiException({'pointer': ''}, str(error))
+ else:
+ exc = JsonApiException({'pointer': ''}, 'Unknown error')
+ return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,
+ {'Content-Type': 'application/vnd.api+json'})
+
+
if __name__ == '__main__':
current_app.run()
diff --git a/config.py b/config.py
--- a/config.py
+++ b/config.py
@@ -54,11 +54,14 @@
SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)
SERVE_STATIC = env.bool('SERVE_STATIC', default=False)
DATABASE_QUERY_TIMEOUT = 0.1
+ SENTRY_DSN = env('SENTRY_DSN', default=None)
+
+ # API configs
SOFT_DELETE = True
PROPOGATE_ERROR = env.bool('PROPOGATE_ERROR', default=False)
DASHERIZE_API = True
+ API_PROPOGATE_UNCAUGHT_EXCEPTIONS = env.bool('API_PROPOGATE_UNCAUGHT_EXCEPTIONS', default=True)
ETAG = True
- SENTRY_DSN = env('SENTRY_DSN', default=None)
if not SQLALCHEMY_DATABASE_URI:
print('`DATABASE_URL` either not exported or empty')
| {"golden_diff": "diff --git a/app/__init__.py b/app/__init__.py\n--- a/app/__init__.py\n+++ b/app/__init__.py\n@@ -17,7 +17,7 @@\n from os import environ\n from envparse import env\n import sys\n-from flask import Flask\n+from flask import Flask, json, make_response\n from app.settings import get_settings, get_setts\n from flask_migrate import Migrate, MigrateCommand\n from flask_script import Manager\n@@ -26,6 +26,8 @@\n from datetime import timedelta\n from flask_cors import CORS\n from raven.contrib.flask import Sentry\n+from flask_rest_jsonapi.errors import jsonapi_errors\n+from flask_rest_jsonapi.exceptions import JsonApiException\n \n import sqlalchemy as sa\n \n@@ -202,5 +204,16 @@\n scheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\n scheduler.start()\n \n+\[email protected](500)\n+def internal_server_error(error):\n+ if current_app.config['PROPOGATE_ERROR'] is True:\n+ exc = JsonApiException({'pointer': ''}, str(error))\n+ else:\n+ exc = JsonApiException({'pointer': ''}, 'Unknown error')\n+ return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n+ {'Content-Type': 'application/vnd.api+json'})\n+\n+\n if __name__ == '__main__':\n current_app.run()\ndiff --git a/config.py b/config.py\n--- a/config.py\n+++ b/config.py\n@@ -54,11 +54,14 @@\n SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)\n SERVE_STATIC = env.bool('SERVE_STATIC', default=False)\n DATABASE_QUERY_TIMEOUT = 0.1\n+ SENTRY_DSN = env('SENTRY_DSN', default=None)\n+\n+ # API configs\n SOFT_DELETE = True\n PROPOGATE_ERROR = env.bool('PROPOGATE_ERROR', default=False)\n DASHERIZE_API = True\n+ API_PROPOGATE_UNCAUGHT_EXCEPTIONS = env.bool('API_PROPOGATE_UNCAUGHT_EXCEPTIONS', default=True)\n ETAG = True\n- SENTRY_DSN = env('SENTRY_DSN', default=None)\n \n if not SQLALCHEMY_DATABASE_URI:\n print('`DATABASE_URL` either not exported or empty')\n", "issue": "Bug: sentry cannot catch internal server errors\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\nsentry cannot catch internal server errors, because api frameworks catches each and every exception\r\n\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\nBased on config variable, framework should not catch exceptions and instead app should handle them and return proper error response\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom envparse import env\n\nenv.read_envfile()\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nVERSION_NAME = '2.0.0-alpha.1'\n\nLANGUAGES = {\n 'en': 'English',\n 'bn': 'Bengali/Bangla',\n 'zh_Hans': 'Chinese (Simplified)',\n 'zh_Hant': 'Chinese (Traditional)',\n 'fr': 'French',\n 'de': 'German',\n 'id': 'Indonesian',\n 'ko': 'Korean',\n 'pl': 'Polish',\n 'es': 'Spanish',\n 'th': 'Thai',\n 'vi': 'Vietnamese',\n 'hi': 'Hindi',\n 'ja': 'Japanese',\n 'ru': 'Russian',\n}\n\n\nclass Config(object):\n \"\"\"\n The base configuration option. Contains the defaults.\n \"\"\"\n\n DEBUG = False\n\n DEVELOPMENT = False\n STAGING = False\n PRODUCTION = False\n TESTING = False\n\n CACHING = False\n PROFILE = False\n SQLALCHEMY_RECORD_QUERIES = False\n\n FLASK_ADMIN_SWATCH = 'lumen'\n\n VERSION = VERSION_NAME\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n ERROR_404_HELP = False\n CSRF_ENABLED = True\n SERVER_NAME = env('SERVER_NAME', default=None)\n CORS_HEADERS = 'Content-Type'\n SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)\n SERVE_STATIC = env.bool('SERVE_STATIC', default=False)\n DATABASE_QUERY_TIMEOUT = 0.1\n SOFT_DELETE = True\n PROPOGATE_ERROR = env.bool('PROPOGATE_ERROR', default=False)\n DASHERIZE_API = True\n ETAG = True\n SENTRY_DSN = env('SENTRY_DSN', default=None)\n\n if not SQLALCHEMY_DATABASE_URI:\n print('`DATABASE_URL` either not exported or empty')\n exit()\n\n BASE_DIR = basedir\n FORCE_SSL = os.getenv('FORCE_SSL', 'no') == 'yes'\n\n if SERVE_STATIC:\n UPLOADS_FOLDER = BASE_DIR + '/static/uploads/'\n TEMP_UPLOADS_FOLDER = BASE_DIR + '/static/uploads/temp/'\n UPLOAD_FOLDER = UPLOADS_FOLDER\n STATIC_URL = '/static/'\n STATIC_ROOT = 'staticfiles'\n STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)\n\n if FORCE_SSL:\n PREFERRED_URL_SCHEME = 'https'\n\n\nclass ProductionConfig(Config):\n \"\"\"\n The configuration for a production environment\n \"\"\"\n\n MINIFY_PAGE = True\n PRODUCTION = True\n CACHING = True\n\n # if force on\n\n\nclass StagingConfig(ProductionConfig):\n \"\"\"\n The configuration for a staging environment\n \"\"\"\n\n PRODUCTION = False\n STAGING = True\n\n\nclass DevelopmentConfig(Config):\n \"\"\"\n The configuration for a development environment\n \"\"\"\n\n DEVELOPMENT = True\n DEBUG = True\n CACHING = True\n PROPOGATE_ERROR = True\n\n # Test database performance\n SQLALCHEMY_RECORD_QUERIES = True\n\n\nclass TestingConfig(Config):\n \"\"\"\n The configuration for a test suit\n \"\"\"\n TESTING = True\n CELERY_ALWAYS_EAGER = True\n CELERY_EAGER_PROPAGATES_EXCEPTIONS = True\n SQLALCHEMY_RECORD_QUERIES = True\n DEBUG_TB_ENABLED = False\n BROKER_BACKEND = 'memory'\n SQLALCHEMY_DATABASE_URI = env('TEST_DATABASE_URL', default=None)\n PROPOGATE_ERROR = True\n", "path": "config.py"}, {"content": "# Ignore ExtDeprecationWarnings for Flask 0.11 - see http://stackoverflow.com/a/38080580\nimport warnings\nfrom flask.exthook import ExtDeprecationWarning\n\nwarnings.simplefilter('ignore', ExtDeprecationWarning)\n# Keep it before flask extensions are imported\n\nfrom pytz import utc\n\nfrom app.helpers.scheduled_jobs import send_mail_to_expired_orders, empty_trash, send_after_event_mail, \\\n send_event_fee_notification, send_event_fee_notification_followup\n\nfrom celery import Celery\nfrom celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom os import environ\nfrom envparse import env\nimport sys\nfrom flask import Flask\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt import JWT\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom raven.contrib.flask import Sentry\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.helpers.flask_ext.helpers import SilentUndefined, camel_case, slugify, MiniJSONEncoder\nfrom app.helpers.payment import forex\nfrom app.models import db\nfrom app.models.user import User\nfrom app.models.event import Event\nfrom app.models.session import Session\nfrom app.api.helpers.jwt import jwt_authenticate, jwt_identity\nfrom helpers.formatter import operation_name\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.flask_ext.errors import NotFoundError, PermissionDeniedError, ServerError, ValidationError\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom app.helpers.data import DataManager, delete_from_db\nfrom app.helpers.helpers import send_after_event\nfrom app.helpers.cache import cache\nfrom helpers.helpers import send_email_for_expired_orders\nfrom werkzeug.contrib.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.helpers.auth import AuthManager\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\napp = Flask(__name__)\n\nenv.read_envfile()\n\n\nclass ReverseProxied(object):\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\n\ndef create_app():\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.json_encoder = MiniJSONEncoder\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n app.jinja_env.add_extension('jinja2.ext.do')\n app.jinja_env.add_extension('jinja2.ext.loopcontrols')\n app.jinja_env.undefined = SilentUndefined\n app.jinja_env.filters['operation_name'] = operation_name\n\n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)\n app.config['JWT_AUTH_URL_RULE'] = '/auth/session'\n _jwt = JWT(app, jwt_authenticate, jwt_identity)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = environ.get('REDIS_URL', 'redis://localhost:6379/0')\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # nextgen api\n with app.app_context():\n from app.api.bootstrap import api_v1\n from app.api.uploads import upload_routes\n app.register_blueprint(api_v1)\n app.register_blueprint(upload_routes)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if app.config['SENTRY_DSN']:\n sentry = Sentry(dsn=app.config['SENTRY_DSN'])\n sentry.init_app(app)\n\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app):\n celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n task_base = celery.Task\n\n class ContextTask(task_base):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n if current_app.config['TESTING']:\n with app.test_request_context():\n return task_base.__call__(self, *args, **kwargs)\n with app.app_context():\n return task_base.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n\ncelery = make_celery(current_app)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, body=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(body['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\n#import api.helpers.tasks\nimport helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\nscheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\nscheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\nscheduler.start()\n\nif __name__ == '__main__':\n current_app.run()\n", "path": "app/__init__.py"}]} | 3,932 | 525 |
gh_patches_debug_32091 | rasdani/github-patches | git_diff | getnikola__nikola-3400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
How could Nikola be able to process WebP format image?
I would like to use webp picture in whole site, but when I built, it reported as:
```
. scale_images:output\images\gm-blur-shown-grid.webp
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')
. scale_images:output\images\gm-blur-shown.webp
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')
```
There were raw webp pictures in my `output` directory, but there won't generate thumbnails.
And then, I search in conf.py, Google and GitHub issue, but didn't find any description to enable this feature.
Is there any way to use webp in nikola site?
How could Nikola be able to process WebP format image?
I would like to use webp picture in whole site, but when I built, it reported as:
```
. scale_images:output\images\gm-blur-shown-grid.webp
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')
. scale_images:output\images\gm-blur-shown.webp
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')
[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')
```
There were raw webp pictures in my `output` directory, but there won't generate thumbnails.
And then, I search in conf.py, Google and GitHub issue, but didn't find any description to enable this feature.
Is there any way to use webp in nikola site?
</issue>
<code>
[start of nikola/image_processing.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Process images."""
28
29 import datetime
30 import gzip
31 import os
32 import re
33
34 import lxml
35 import piexif
36 from PIL import ExifTags, Image
37
38 from nikola import utils
39
40 EXIF_TAG_NAMES = {}
41
42
43 class ImageProcessor(object):
44 """Apply image operations."""
45
46 image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff']
47
48 def _fill_exif_tag_names(self):
49 """Connect EXIF tag names to numeric values."""
50 if not EXIF_TAG_NAMES:
51 for ifd in piexif.TAGS:
52 for tag, data in piexif.TAGS[ifd].items():
53 EXIF_TAG_NAMES[tag] = data['name']
54
55 def filter_exif(self, exif, whitelist):
56 """Filter EXIF data as described in the documentation."""
57 # Scenario 1: keep everything
58 if whitelist == {'*': '*'}:
59 return exif
60
61 # Scenario 2: keep nothing
62 if whitelist == {}:
63 return None
64
65 # Scenario 3: keep some
66 self._fill_exif_tag_names()
67 exif = exif.copy() # Don't modify in-place, it's rude
68 for k in list(exif.keys()):
69 if type(exif[k]) != dict:
70 pass # At least thumbnails have no fields
71 elif k not in whitelist:
72 exif.pop(k) # Not whitelisted, remove
73 elif k in whitelist and whitelist[k] == '*':
74 # Fully whitelisted, keep all
75 pass
76 else:
77 # Partially whitelisted
78 for tag in list(exif[k].keys()):
79 if EXIF_TAG_NAMES[tag] not in whitelist[k]:
80 exif[k].pop(tag)
81
82 return exif or None
83
84 def resize_image(self, src, dst=None, max_size=None, bigger_panoramas=True, preserve_exif_data=False, exif_whitelist={}, preserve_icc_profiles=False, dst_paths=None, max_sizes=None):
85 """Make a copy of the image in the requested size(s).
86
87 max_sizes should be a list of sizes, and the image would be resized to fit in a
88 square of each size (preserving aspect ratio).
89
90 dst_paths is a list of the destination paths, and should be the same length as max_sizes.
91
92 Backwards compatibility:
93
94 * If max_sizes is None, it's set to [max_size]
95 * If dst_paths is None, it's set to [dst]
96 * Either max_size or max_sizes should be set
97 * Either dst or dst_paths should be set
98 """
99 if dst_paths is None:
100 dst_paths = [dst]
101 if max_sizes is None:
102 max_sizes = [max_size]
103 if len(max_sizes) != len(dst_paths):
104 raise ValueError('resize_image called with incompatible arguments: {} / {}'.format(dst_paths, max_sizes))
105 extension = os.path.splitext(src)[1].lower()
106 if extension in {'.svg', '.svgz'}:
107 self.resize_svg(src, dst_paths, max_sizes, bigger_panoramas)
108 return
109
110 _im = Image.open(src)
111
112 # The jpg exclusion is Issue #3332
113 is_animated = hasattr(_im, 'n_frames') and _im.n_frames > 1 and extension not in {'.jpg', '.jpeg'}
114
115 exif = None
116 if "exif" in _im.info:
117 exif = piexif.load(_im.info["exif"])
118 # Rotate according to EXIF
119 if "0th" in exif:
120 value = exif['0th'].get(piexif.ImageIFD.Orientation, 1)
121 if value in (3, 4):
122 _im = _im.transpose(Image.ROTATE_180)
123 elif value in (5, 6):
124 _im = _im.transpose(Image.ROTATE_270)
125 elif value in (7, 8):
126 _im = _im.transpose(Image.ROTATE_90)
127 if value in (2, 4, 5, 7):
128 _im = _im.transpose(Image.FLIP_LEFT_RIGHT)
129 exif['0th'][piexif.ImageIFD.Orientation] = 1
130 exif = self.filter_exif(exif, exif_whitelist)
131
132 icc_profile = _im.info.get('icc_profile') if preserve_icc_profiles else None
133
134 for dst, max_size in zip(dst_paths, max_sizes):
135 if is_animated: # Animated gif, leave as-is
136 utils.copy_file(src, dst)
137 continue
138
139 im = _im.copy()
140
141 size = w, h = im.size
142 if w > max_size or h > max_size:
143 size = max_size, max_size
144 # Panoramas get larger thumbnails because they look *awful*
145 if bigger_panoramas and w > 2 * h:
146 size = min(w, max_size * 4), min(w, max_size * 4)
147 try:
148 im.thumbnail(size, Image.ANTIALIAS)
149 if exif is not None and preserve_exif_data:
150 # Put right size in EXIF data
151 w, h = im.size
152 if '0th' in exif:
153 exif["0th"][piexif.ImageIFD.ImageWidth] = w
154 exif["0th"][piexif.ImageIFD.ImageLength] = h
155 if 'Exif' in exif:
156 exif["Exif"][piexif.ExifIFD.PixelXDimension] = w
157 exif["Exif"][piexif.ExifIFD.PixelYDimension] = h
158 # Filter EXIF data as required
159 im.save(dst, exif=piexif.dump(exif), icc_profile=icc_profile)
160 else:
161 im.save(dst, icc_profile=icc_profile)
162 except Exception as e:
163 self.logger.warning("Can't process {0}, using original "
164 "image! ({1})".format(src, e))
165 utils.copy_file(src, dst)
166
167 def resize_svg(self, src, dst_paths, max_sizes, bigger_panoramas):
168 """Make a copy of an svg at the requested sizes."""
169 # Resize svg based on viewport hacking.
170 # note that this can also lead to enlarged svgs
171 if src.endswith('.svgz'):
172 with gzip.GzipFile(src, 'rb') as op:
173 xml = op.read()
174 else:
175 with open(src, 'rb') as op:
176 xml = op.read()
177
178 for dst, max_size in zip(dst_paths, max_sizes):
179 try:
180 tree = lxml.etree.XML(xml)
181 width = tree.attrib['width']
182 height = tree.attrib['height']
183 w = int(re.search("[0-9]+", width).group(0))
184 h = int(re.search("[0-9]+", height).group(0))
185 # calculate new size preserving aspect ratio.
186 ratio = float(w) / h
187 # Panoramas get larger thumbnails because they look *awful*
188 if bigger_panoramas and w > 2 * h:
189 max_size = max_size * 4
190 if w > h:
191 w = max_size
192 h = max_size / ratio
193 else:
194 w = max_size * ratio
195 h = max_size
196 w = int(w)
197 h = int(h)
198 tree.attrib.pop("width")
199 tree.attrib.pop("height")
200 tree.attrib['viewport'] = "0 0 %ipx %ipx" % (w, h)
201 if dst.endswith('.svgz'):
202 op = gzip.GzipFile(dst, 'wb')
203 else:
204 op = open(dst, 'wb')
205 op.write(lxml.etree.tostring(tree))
206 op.close()
207 except (KeyError, AttributeError) as e:
208 self.logger.warning("No width/height in %s. Original exception: %s" % (src, e))
209 utils.copy_file(src, dst)
210
211 def image_date(self, src):
212 """Try to figure out the date of the image."""
213 if src not in self.dates:
214 try:
215 im = Image.open(src)
216 exif = im._getexif()
217 except Exception:
218 exif = None
219 if exif is not None:
220 for tag, value in list(exif.items()):
221 decoded = ExifTags.TAGS.get(tag, tag)
222 if decoded in ('DateTimeOriginal', 'DateTimeDigitized'):
223 try:
224 if isinstance(value, tuple):
225 value = value[0]
226 self.dates[src] = datetime.datetime.strptime(
227 value, '%Y:%m:%d %H:%M:%S')
228 break
229 except ValueError: # Invalid EXIF date.
230 pass
231 if src not in self.dates:
232 self.dates[src] = datetime.datetime.fromtimestamp(
233 os.stat(src).st_mtime)
234 return self.dates[src]
235
[end of nikola/image_processing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/image_processing.py b/nikola/image_processing.py
--- a/nikola/image_processing.py
+++ b/nikola/image_processing.py
@@ -43,7 +43,7 @@
class ImageProcessor(object):
"""Apply image operations."""
- image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff']
+ image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff', '.webp']
def _fill_exif_tag_names(self):
"""Connect EXIF tag names to numeric values."""
@@ -146,6 +146,10 @@
size = min(w, max_size * 4), min(w, max_size * 4)
try:
im.thumbnail(size, Image.ANTIALIAS)
+ save_args = {}
+ if icc_profile:
+ save_args['icc_profile'] = icc_profile
+
if exif is not None and preserve_exif_data:
# Put right size in EXIF data
w, h = im.size
@@ -156,9 +160,9 @@
exif["Exif"][piexif.ExifIFD.PixelXDimension] = w
exif["Exif"][piexif.ExifIFD.PixelYDimension] = h
# Filter EXIF data as required
- im.save(dst, exif=piexif.dump(exif), icc_profile=icc_profile)
- else:
- im.save(dst, icc_profile=icc_profile)
+ save_args['exif'] = piexif.dump(exif)
+
+ im.save(dst, **save_args)
except Exception as e:
self.logger.warning("Can't process {0}, using original "
"image! ({1})".format(src, e))
| {"golden_diff": "diff --git a/nikola/image_processing.py b/nikola/image_processing.py\n--- a/nikola/image_processing.py\n+++ b/nikola/image_processing.py\n@@ -43,7 +43,7 @@\n class ImageProcessor(object):\n \"\"\"Apply image operations.\"\"\"\n \n- image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff']\n+ image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff', '.webp']\n \n def _fill_exif_tag_names(self):\n \"\"\"Connect EXIF tag names to numeric values.\"\"\"\n@@ -146,6 +146,10 @@\n size = min(w, max_size * 4), min(w, max_size * 4)\n try:\n im.thumbnail(size, Image.ANTIALIAS)\n+ save_args = {}\n+ if icc_profile:\n+ save_args['icc_profile'] = icc_profile\n+\n if exif is not None and preserve_exif_data:\n # Put right size in EXIF data\n w, h = im.size\n@@ -156,9 +160,9 @@\n exif[\"Exif\"][piexif.ExifIFD.PixelXDimension] = w\n exif[\"Exif\"][piexif.ExifIFD.PixelYDimension] = h\n # Filter EXIF data as required\n- im.save(dst, exif=piexif.dump(exif), icc_profile=icc_profile)\n- else:\n- im.save(dst, icc_profile=icc_profile)\n+ save_args['exif'] = piexif.dump(exif)\n+\n+ im.save(dst, **save_args)\n except Exception as e:\n self.logger.warning(\"Can't process {0}, using original \"\n \"image! ({1})\".format(src, e))\n", "issue": "How could Nikola be able to process WebP format image?\nI would like to use webp picture in whole site, but when I built, it reported as:\r\n\r\n```\r\n. scale_images:output\\images\\gm-blur-shown-grid.webp\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n. scale_images:output\\images\\gm-blur-shown.webp\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n```\r\n\r\nThere were raw webp pictures in my `output` directory, but there won't generate thumbnails.\r\n\r\nAnd then, I search in conf.py, Google and GitHub issue, but didn't find any description to enable this feature.\r\nIs there any way to use webp in nikola site?\nHow could Nikola be able to process WebP format image?\nI would like to use webp picture in whole site, but when I built, it reported as:\r\n\r\n```\r\n. scale_images:output\\images\\gm-blur-shown-grid.webp\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown-grid.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n. scale_images:output\\images\\gm-blur-shown.webp\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n[2020-04-27T18:28:19Z] WARNING: scale_images: Can't process images\\gm-blur-shown.webp, using original image! (a bytes-like object is required, not 'NoneType')\r\n```\r\n\r\nThere were raw webp pictures in my `output` directory, but there won't generate thumbnails.\r\n\r\nAnd then, I search in conf.py, Google and GitHub issue, but didn't find any description to enable this feature.\r\nIs there any way to use webp in nikola site?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Process images.\"\"\"\n\nimport datetime\nimport gzip\nimport os\nimport re\n\nimport lxml\nimport piexif\nfrom PIL import ExifTags, Image\n\nfrom nikola import utils\n\nEXIF_TAG_NAMES = {}\n\n\nclass ImageProcessor(object):\n \"\"\"Apply image operations.\"\"\"\n\n image_ext_list_builtin = ['.jpg', '.png', '.jpeg', '.gif', '.svg', '.svgz', '.bmp', '.tiff']\n\n def _fill_exif_tag_names(self):\n \"\"\"Connect EXIF tag names to numeric values.\"\"\"\n if not EXIF_TAG_NAMES:\n for ifd in piexif.TAGS:\n for tag, data in piexif.TAGS[ifd].items():\n EXIF_TAG_NAMES[tag] = data['name']\n\n def filter_exif(self, exif, whitelist):\n \"\"\"Filter EXIF data as described in the documentation.\"\"\"\n # Scenario 1: keep everything\n if whitelist == {'*': '*'}:\n return exif\n\n # Scenario 2: keep nothing\n if whitelist == {}:\n return None\n\n # Scenario 3: keep some\n self._fill_exif_tag_names()\n exif = exif.copy() # Don't modify in-place, it's rude\n for k in list(exif.keys()):\n if type(exif[k]) != dict:\n pass # At least thumbnails have no fields\n elif k not in whitelist:\n exif.pop(k) # Not whitelisted, remove\n elif k in whitelist and whitelist[k] == '*':\n # Fully whitelisted, keep all\n pass\n else:\n # Partially whitelisted\n for tag in list(exif[k].keys()):\n if EXIF_TAG_NAMES[tag] not in whitelist[k]:\n exif[k].pop(tag)\n\n return exif or None\n\n def resize_image(self, src, dst=None, max_size=None, bigger_panoramas=True, preserve_exif_data=False, exif_whitelist={}, preserve_icc_profiles=False, dst_paths=None, max_sizes=None):\n \"\"\"Make a copy of the image in the requested size(s).\n\n max_sizes should be a list of sizes, and the image would be resized to fit in a\n square of each size (preserving aspect ratio).\n\n dst_paths is a list of the destination paths, and should be the same length as max_sizes.\n\n Backwards compatibility:\n\n * If max_sizes is None, it's set to [max_size]\n * If dst_paths is None, it's set to [dst]\n * Either max_size or max_sizes should be set\n * Either dst or dst_paths should be set\n \"\"\"\n if dst_paths is None:\n dst_paths = [dst]\n if max_sizes is None:\n max_sizes = [max_size]\n if len(max_sizes) != len(dst_paths):\n raise ValueError('resize_image called with incompatible arguments: {} / {}'.format(dst_paths, max_sizes))\n extension = os.path.splitext(src)[1].lower()\n if extension in {'.svg', '.svgz'}:\n self.resize_svg(src, dst_paths, max_sizes, bigger_panoramas)\n return\n\n _im = Image.open(src)\n\n # The jpg exclusion is Issue #3332\n is_animated = hasattr(_im, 'n_frames') and _im.n_frames > 1 and extension not in {'.jpg', '.jpeg'}\n\n exif = None\n if \"exif\" in _im.info:\n exif = piexif.load(_im.info[\"exif\"])\n # Rotate according to EXIF\n if \"0th\" in exif:\n value = exif['0th'].get(piexif.ImageIFD.Orientation, 1)\n if value in (3, 4):\n _im = _im.transpose(Image.ROTATE_180)\n elif value in (5, 6):\n _im = _im.transpose(Image.ROTATE_270)\n elif value in (7, 8):\n _im = _im.transpose(Image.ROTATE_90)\n if value in (2, 4, 5, 7):\n _im = _im.transpose(Image.FLIP_LEFT_RIGHT)\n exif['0th'][piexif.ImageIFD.Orientation] = 1\n exif = self.filter_exif(exif, exif_whitelist)\n\n icc_profile = _im.info.get('icc_profile') if preserve_icc_profiles else None\n\n for dst, max_size in zip(dst_paths, max_sizes):\n if is_animated: # Animated gif, leave as-is\n utils.copy_file(src, dst)\n continue\n\n im = _im.copy()\n\n size = w, h = im.size\n if w > max_size or h > max_size:\n size = max_size, max_size\n # Panoramas get larger thumbnails because they look *awful*\n if bigger_panoramas and w > 2 * h:\n size = min(w, max_size * 4), min(w, max_size * 4)\n try:\n im.thumbnail(size, Image.ANTIALIAS)\n if exif is not None and preserve_exif_data:\n # Put right size in EXIF data\n w, h = im.size\n if '0th' in exif:\n exif[\"0th\"][piexif.ImageIFD.ImageWidth] = w\n exif[\"0th\"][piexif.ImageIFD.ImageLength] = h\n if 'Exif' in exif:\n exif[\"Exif\"][piexif.ExifIFD.PixelXDimension] = w\n exif[\"Exif\"][piexif.ExifIFD.PixelYDimension] = h\n # Filter EXIF data as required\n im.save(dst, exif=piexif.dump(exif), icc_profile=icc_profile)\n else:\n im.save(dst, icc_profile=icc_profile)\n except Exception as e:\n self.logger.warning(\"Can't process {0}, using original \"\n \"image! ({1})\".format(src, e))\n utils.copy_file(src, dst)\n\n def resize_svg(self, src, dst_paths, max_sizes, bigger_panoramas):\n \"\"\"Make a copy of an svg at the requested sizes.\"\"\"\n # Resize svg based on viewport hacking.\n # note that this can also lead to enlarged svgs\n if src.endswith('.svgz'):\n with gzip.GzipFile(src, 'rb') as op:\n xml = op.read()\n else:\n with open(src, 'rb') as op:\n xml = op.read()\n\n for dst, max_size in zip(dst_paths, max_sizes):\n try:\n tree = lxml.etree.XML(xml)\n width = tree.attrib['width']\n height = tree.attrib['height']\n w = int(re.search(\"[0-9]+\", width).group(0))\n h = int(re.search(\"[0-9]+\", height).group(0))\n # calculate new size preserving aspect ratio.\n ratio = float(w) / h\n # Panoramas get larger thumbnails because they look *awful*\n if bigger_panoramas and w > 2 * h:\n max_size = max_size * 4\n if w > h:\n w = max_size\n h = max_size / ratio\n else:\n w = max_size * ratio\n h = max_size\n w = int(w)\n h = int(h)\n tree.attrib.pop(\"width\")\n tree.attrib.pop(\"height\")\n tree.attrib['viewport'] = \"0 0 %ipx %ipx\" % (w, h)\n if dst.endswith('.svgz'):\n op = gzip.GzipFile(dst, 'wb')\n else:\n op = open(dst, 'wb')\n op.write(lxml.etree.tostring(tree))\n op.close()\n except (KeyError, AttributeError) as e:\n self.logger.warning(\"No width/height in %s. Original exception: %s\" % (src, e))\n utils.copy_file(src, dst)\n\n def image_date(self, src):\n \"\"\"Try to figure out the date of the image.\"\"\"\n if src not in self.dates:\n try:\n im = Image.open(src)\n exif = im._getexif()\n except Exception:\n exif = None\n if exif is not None:\n for tag, value in list(exif.items()):\n decoded = ExifTags.TAGS.get(tag, tag)\n if decoded in ('DateTimeOriginal', 'DateTimeDigitized'):\n try:\n if isinstance(value, tuple):\n value = value[0]\n self.dates[src] = datetime.datetime.strptime(\n value, '%Y:%m:%d %H:%M:%S')\n break\n except ValueError: # Invalid EXIF date.\n pass\n if src not in self.dates:\n self.dates[src] = datetime.datetime.fromtimestamp(\n os.stat(src).st_mtime)\n return self.dates[src]\n", "path": "nikola/image_processing.py"}]} | 4,064 | 425 |
gh_patches_debug_13244 | rasdani/github-patches | git_diff | googleapis__python-bigquery-1542 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make `google-cloud-bigquery-storage>=2.6.0` the minimum version
2.6.0 makes `read_session` optional to `rows()` (used in pandas connector).
BLOCKED: wait for a few versions of `google-cloud-bigquery-storage` before bumping the minimum.
(TODO: I still want to figure out how long it takes people to upgrade naturally so that we can set some data-driven policies around deps upgrades)
</issue>
<code>
[start of setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "grpcio >= 1.47.0, < 2.0dev", # https://github.com/googleapis/python-bigquery/issues/1262
33 "grpcio >= 1.49.1, < 2.0dev; python_version>='3.11'",
34 # NOTE: Maintainers, please do not require google-api-core>=2.x.x
35 # Until this issue is closed
36 # https://github.com/googleapis/google-cloud-python/issues/10566
37 "google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
38 "proto-plus >= 1.15.0, <2.0.0dev",
39 # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x
40 # Until this issue is closed
41 # https://github.com/googleapis/google-cloud-python/issues/10566
42 "google-cloud-core >= 1.6.0, <3.0.0dev",
43 "google-resumable-media >= 0.6.0, < 3.0dev",
44 "packaging >= 20.0.0",
45 "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", # For the legacy proto-based types.
46 "python-dateutil >= 2.7.2, <3.0dev",
47 "requests >= 2.21.0, < 3.0.0dev",
48 ]
49 pyarrow_dependency = "pyarrow >= 3.0.0"
50 extras = {
51 # Keep the no-op bqstorage extra for backward compatibility.
52 # See: https://github.com/googleapis/python-bigquery/issues/757
53 "bqstorage": [
54 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
55 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
56 # installed, even though `google-cloud-bigquery-storage` specifies it
57 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
58 # See: https://github.com/googleapis/python-bigquery/issues/83 The
59 # grpc.Channel.close() method isn't added until 1.32.0.
60 # https://github.com/grpc/grpc/pull/15254
61 "grpcio >= 1.47.0, < 2.0dev",
62 "grpcio >= 1.49.1, < 2.0dev; python_version>='3.11'",
63 pyarrow_dependency,
64 ],
65 "pandas": [
66 "pandas>=1.1.0",
67 pyarrow_dependency,
68 "db-dtypes>=0.3.0,<2.0.0dev",
69 ],
70 "ipywidgets": [
71 "ipywidgets>=7.7.0",
72 "ipykernel>=6.0.0",
73 ],
74 "geopandas": ["geopandas>=0.9.0, <1.0dev", "Shapely>=1.8.4, <2.0dev"],
75 "ipython": [
76 "ipython>=7.23.1,!=8.1.0",
77 "ipykernel>=6.0.0",
78 ],
79 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
80 "opentelemetry": [
81 "opentelemetry-api >= 1.1.0",
82 "opentelemetry-sdk >= 1.1.0",
83 "opentelemetry-instrumentation >= 0.20b0",
84 ],
85 }
86
87 all_extras = []
88
89 for extra in extras:
90 all_extras.extend(extras[extra])
91
92 extras["all"] = all_extras
93
94 # Setup boilerplate below this line.
95
96 package_root = os.path.abspath(os.path.dirname(__file__))
97
98 readme_filename = os.path.join(package_root, "README.rst")
99 with io.open(readme_filename, encoding="utf-8") as readme_file:
100 readme = readme_file.read()
101
102 version = {}
103 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
104 exec(fp.read(), version)
105 version = version["__version__"]
106
107 # Only include packages under the 'google' namespace. Do not include tests,
108 # benchmarks, etc.
109 packages = [
110 package
111 for package in setuptools.PEP420PackageFinder.find()
112 if package.startswith("google")
113 ]
114
115 # Determine which namespaces are needed.
116 namespaces = ["google"]
117 if "google.cloud" in packages:
118 namespaces.append("google.cloud")
119
120
121 setuptools.setup(
122 name=name,
123 version=version,
124 description=description,
125 long_description=readme,
126 author="Google LLC",
127 author_email="[email protected]",
128 license="Apache 2.0",
129 url="https://github.com/googleapis/python-bigquery",
130 classifiers=[
131 release_status,
132 "Intended Audience :: Developers",
133 "License :: OSI Approved :: Apache Software License",
134 "Programming Language :: Python",
135 "Programming Language :: Python :: 3",
136 "Programming Language :: Python :: 3.7",
137 "Programming Language :: Python :: 3.8",
138 "Programming Language :: Python :: 3.9",
139 "Programming Language :: Python :: 3.10",
140 "Programming Language :: Python :: 3.11",
141 "Operating System :: OS Independent",
142 "Topic :: Internet",
143 ],
144 platforms="Posix; MacOS X; Windows",
145 packages=packages,
146 namespace_packages=namespaces,
147 install_requires=dependencies,
148 extras_require=extras,
149 python_requires=">=3.7",
150 include_package_data=True,
151 zip_safe=False,
152 )
153
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
# Keep the no-op bqstorage extra for backward compatibility.
# See: https://github.com/googleapis/python-bigquery/issues/757
"bqstorage": [
- "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
+ "google-cloud-bigquery-storage >= 2.6.0, <3.0.0dev",
# Due to an issue in pip's dependency resolver, the `grpc` extra is not
# installed, even though `google-cloud-bigquery-storage` specifies it
# as `google-api-core[grpc]`. We thus need to explicitly specify it here.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n # Keep the no-op bqstorage extra for backward compatibility.\n # See: https://github.com/googleapis/python-bigquery/issues/757\n \"bqstorage\": [\n- \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n+ \"google-cloud-bigquery-storage >= 2.6.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n", "issue": "make `google-cloud-bigquery-storage>=2.6.0` the minimum version\n2.6.0 makes `read_session` optional to `rows()` (used in pandas connector).\r\n\r\nBLOCKED: wait for a few versions of `google-cloud-bigquery-storage` before bumping the minimum.\r\n\r\n(TODO: I still want to figure out how long it takes people to upgrade naturally so that we can set some data-driven policies around deps upgrades)\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"grpcio >= 1.47.0, < 2.0dev\", # https://github.com/googleapis/python-bigquery/issues/1262\n \"grpcio >= 1.49.1, < 2.0dev; python_version>='3.11'\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"proto-plus >= 1.15.0, <2.0.0dev\",\n # NOTE: Maintainers, please do not require google-cloud-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-cloud-core >= 1.6.0, <3.0.0dev\",\n \"google-resumable-media >= 0.6.0, < 3.0dev\",\n \"packaging >= 20.0.0\",\n \"protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5\", # For the legacy proto-based types.\n \"python-dateutil >= 2.7.2, <3.0dev\",\n \"requests >= 2.21.0, < 3.0.0dev\",\n]\npyarrow_dependency = \"pyarrow >= 3.0.0\"\nextras = {\n # Keep the no-op bqstorage extra for backward compatibility.\n # See: https://github.com/googleapis/python-bigquery/issues/757\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.47.0, < 2.0dev\",\n \"grpcio >= 1.49.1, < 2.0dev; python_version>='3.11'\",\n pyarrow_dependency,\n ],\n \"pandas\": [\n \"pandas>=1.1.0\",\n pyarrow_dependency,\n \"db-dtypes>=0.3.0,<2.0.0dev\",\n ],\n \"ipywidgets\": [\n \"ipywidgets>=7.7.0\",\n \"ipykernel>=6.0.0\",\n ],\n \"geopandas\": [\"geopandas>=0.9.0, <1.0dev\", \"Shapely>=1.8.4, <2.0dev\"],\n \"ipython\": [\n \"ipython>=7.23.1,!=8.1.0\",\n \"ipykernel>=6.0.0\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 1.1.0\",\n \"opentelemetry-sdk >= 1.1.0\",\n \"opentelemetry-instrumentation >= 0.20b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.7\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 2,538 | 175 |
gh_patches_debug_20484 | rasdani/github-patches | git_diff | conda__conda-5231 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda remove --all behavior changed in 4.2.7
`conda remove --all` used to remove an environment if it existed, and quietly succeed if it was already gone. Now this command fails when the environment does not exist. This makes something formerly easy to do in an automated way now difficult especially in a cross-platform way on Windows.
```
C:\>conda remove --all -n nonenv
CondaEnvironmentNotFoundError: Could not find environment: nonenv .
You can list all discoverable environments with `conda info --envs`.
C:\>echo %ERRORLEVEL%
1
```
Just suppressing the error is not appealing, because there are many error modes different from the environment not existing that should trigger a failure.
</issue>
<code>
[start of conda/cli/main_remove.py]
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 from __future__ import absolute_import, division, print_function, unicode_literals
8
9 from argparse import RawDescriptionHelpFormatter
10 from collections import defaultdict
11 import logging
12 from os.path import abspath, join
13 import sys
14
15 from .conda_argparse import (add_parser_channels, add_parser_help, add_parser_json,
16 add_parser_no_pin, add_parser_no_use_index_cache, add_parser_offline,
17 add_parser_prefix, add_parser_pscheck, add_parser_quiet,
18 add_parser_use_index_cache, add_parser_use_local, add_parser_yes)
19
20 try:
21 from cytoolz.itertoolz import groupby
22 except ImportError:
23 from .._vendor.toolz.itertoolz import groupby
24
25 help = "%s a list of packages from a specified conda environment."
26 descr = help + """
27
28 This command will also remove any package that depends on any of the
29 specified packages as well---unless a replacement can be found without
30 that dependency. If you wish to skip this dependency checking and remove
31 just the requested packages, add the '--force' option. Note however that
32 this may result in a broken environment, so use this with caution.
33 """
34 example = """
35 Examples:
36
37 conda %s -n myenv scipy
38
39 """
40
41 uninstall_help = "Alias for conda remove. See conda remove --help."
42 log = logging.getLogger(__name__)
43
44
45 def configure_parser(sub_parsers, name='remove'):
46 if name == 'remove':
47 p = sub_parsers.add_parser(
48 name,
49 formatter_class=RawDescriptionHelpFormatter,
50 description=descr % name.capitalize(),
51 help=help % name.capitalize(),
52 epilog=example % name,
53 add_help=False,
54 )
55 else:
56 p = sub_parsers.add_parser(
57 name,
58 formatter_class=RawDescriptionHelpFormatter,
59 description=uninstall_help,
60 help=uninstall_help,
61 epilog=example % name,
62 add_help=False,
63 )
64 add_parser_help(p)
65 add_parser_yes(p)
66 add_parser_json(p)
67 p.add_argument(
68 "--all",
69 action="store_true",
70 help="%s all packages, i.e., the entire environment." % name.capitalize(),
71 )
72 p.add_argument(
73 "--features",
74 action="store_true",
75 help="%s features (instead of packages)." % name.capitalize(),
76 )
77 p.add_argument(
78 "--force",
79 action="store_true",
80 help="Forces removal of a package without removing packages that depend on it. "
81 "Using this option will usually leave your environment in a broken and "
82 "inconsistent state.",
83 )
84 add_parser_no_pin(p)
85 add_parser_channels(p)
86 add_parser_prefix(p)
87 add_parser_quiet(p)
88 # Putting this one first makes it the default
89 add_parser_no_use_index_cache(p)
90 add_parser_use_index_cache(p)
91 add_parser_use_local(p)
92 add_parser_offline(p)
93 add_parser_pscheck(p)
94 p.add_argument(
95 'package_names',
96 metavar='package_name',
97 action="store",
98 nargs='*',
99 help="Package names to %s from the environment." % name,
100 )
101 p.set_defaults(func=execute)
102
103
104 def execute(args, parser):
105 from .common import (confirm_yn, ensure_override_channels_requires_channel, ensure_use_local,
106 names_in_specs, specs_from_args, stdout_json)
107 from ..base.constants import ROOT_NO_RM
108 from ..base.context import context
109 from ..common.compat import iteritems, iterkeys
110 from ..common.path import is_private_env_path
111 from ..console import json_progress_bars
112 from ..core.index import get_index
113 from ..exceptions import CondaEnvironmentError, CondaValueError, PackageNotFoundError
114 from ..gateways.disk.delete import delete_trash
115 from ..plan import (get_blank_actions)
116 from ..core.solve import get_resolve_object
117 from ..core.solve import solve_for_actions
118 from ..resolve import MatchSpec
119 from ..core.linked_data import linked_data
120 from ..gateways.disk.delete import rm_rf
121 from ..instructions import PREFIX
122 from ..plan import (add_unlink, display_actions, execute_actions,
123 nothing_to_do, remove_actions)
124
125 if not (args.all or args.package_names):
126 raise CondaValueError('no package names supplied,\n'
127 ' try "conda remove -h" for more details')
128
129 prefix = context.prefix_w_legacy_search
130 if args.all and prefix == context.default_prefix:
131 msg = "cannot remove current environment. deactivate and run conda remove again"
132 raise CondaEnvironmentError(msg)
133 ensure_use_local(args)
134 ensure_override_channels_requires_channel(args)
135 if not args.features and args.all:
136 index = linked_data(prefix)
137 index = {dist: info for dist, info in iteritems(index)}
138 else:
139 index = get_index(channel_urls=context.channels,
140 prepend=not args.override_channels,
141 use_local=args.use_local,
142 use_cache=args.use_index_cache,
143 prefix=prefix)
144 specs = None
145 if args.features:
146 specs = [MatchSpec(track_features=f) for f in set(args.package_names)]
147 actions = remove_actions(prefix, specs, index, pinned=context.respect_pinned)
148 actions['ACTION'] = 'REMOVE_FEATURE'
149 action_groups = (actions, index),
150 elif args.all:
151 if prefix == context.root_prefix:
152 raise CondaEnvironmentError('cannot remove root environment,\n'
153 ' add -n NAME or -p PREFIX option')
154 actions = defaultdict(list)
155 actions[PREFIX] = prefix
156 for dist in sorted(iterkeys(index)):
157 add_unlink(actions, dist)
158 actions['ACTION'] = 'REMOVE_ALL'
159 action_groups = (actions, index),
160 elif prefix == context.root_prefix and not context.prefix_specified:
161 from ..core.envs_manager import EnvsDirectory
162 ed = EnvsDirectory(join(context.root_prefix, 'envs'))
163 get_env = lambda s: ed.get_registered_preferred_env(MatchSpec(s).name)
164 specs = specs_from_args(args.package_names)
165 env_spec_map = groupby(get_env, specs)
166 action_groups = []
167 for env_name, spcs in iteritems(env_spec_map):
168 pfx = ed.to_prefix(env_name)
169 r = get_resolve_object(index.copy(), pfx)
170 specs_to_remove = tuple(MatchSpec(s) for s in spcs)
171 prune = pfx != context.root_prefix
172 dists_for_unlinking, dists_for_linking = solve_for_actions(
173 pfx, r,
174 specs_to_remove=specs_to_remove, prune=prune,
175 )
176 actions = get_blank_actions(pfx)
177 actions['UNLINK'].extend(dists_for_unlinking)
178 actions['LINK'].extend(dists_for_linking)
179 actions['SPECS'].extend(s.spec for s in specs_to_remove)
180 actions['ACTION'] = 'REMOVE'
181 action_groups.append((actions, r.index))
182 action_groups = tuple(action_groups)
183 else:
184 specs = specs_from_args(args.package_names)
185 if sys.prefix == abspath(prefix) and names_in_specs(ROOT_NO_RM, specs) and not args.force:
186 raise CondaEnvironmentError('cannot remove %s from root environment' %
187 ', '.join(ROOT_NO_RM))
188 action_groups = (remove_actions(prefix, list(specs), index=index, force=args.force,
189 pinned=context.respect_pinned), index),
190
191 delete_trash()
192 if any(nothing_to_do(x[0]) for x in action_groups):
193 if args.all:
194 print("\nRemove all packages in environment %s:\n" % prefix, file=sys.stderr)
195 if not context.json:
196 confirm_yn(args)
197 rm_rf(prefix)
198
199 if context.json:
200 stdout_json({
201 'success': True,
202 'actions': tuple(x[0] for x in action_groups)
203 })
204 return
205
206 pkg = str(args.package_names).replace("['", "")
207 pkg = pkg.replace("']", "")
208
209 error_message = "No packages named '%s' found to remove from environment." % pkg
210 raise PackageNotFoundError(error_message)
211 if not context.json:
212 for actions, ndx in action_groups:
213 print()
214 print("Package plan for package removal in environment %s:" % actions["PREFIX"])
215 display_actions(actions, ndx)
216 elif context.json and args.dry_run:
217 stdout_json({
218 'success': True,
219 'dry_run': True,
220 'actions': tuple(x[0] for x in action_groups),
221 })
222 return
223
224 if not context.json:
225 confirm_yn(args)
226
227 for actions, ndx in action_groups:
228 if context.json and not context.quiet:
229 with json_progress_bars():
230 execute_actions(actions, ndx, verbose=not context.quiet)
231 else:
232 execute_actions(actions, ndx, verbose=not context.quiet)
233
234 target_prefix = actions["PREFIX"]
235 if is_private_env_path(target_prefix) and linked_data(target_prefix) == {}:
236 rm_rf(target_prefix)
237
238 if args.all:
239 rm_rf(prefix)
240
241 if context.json:
242 stdout_json({
243 'success': True,
244 'actions': tuple(x[0] for x in action_groups),
245 })
246
[end of conda/cli/main_remove.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/cli/main_remove.py b/conda/cli/main_remove.py
--- a/conda/cli/main_remove.py
+++ b/conda/cli/main_remove.py
@@ -9,7 +9,7 @@
from argparse import RawDescriptionHelpFormatter
from collections import defaultdict
import logging
-from os.path import abspath, join
+from os.path import abspath, join, isdir
import sys
from .conda_argparse import (add_parser_channels, add_parser_help, add_parser_json,
@@ -130,6 +130,10 @@
if args.all and prefix == context.default_prefix:
msg = "cannot remove current environment. deactivate and run conda remove again"
raise CondaEnvironmentError(msg)
+ if args.all and not isdir(prefix):
+ # full environment removal was requested, but environment doesn't exist anyway
+ return 0
+
ensure_use_local(args)
ensure_override_channels_requires_channel(args)
if not args.features and args.all:
| {"golden_diff": "diff --git a/conda/cli/main_remove.py b/conda/cli/main_remove.py\n--- a/conda/cli/main_remove.py\n+++ b/conda/cli/main_remove.py\n@@ -9,7 +9,7 @@\n from argparse import RawDescriptionHelpFormatter\n from collections import defaultdict\n import logging\n-from os.path import abspath, join\n+from os.path import abspath, join, isdir\n import sys\n \n from .conda_argparse import (add_parser_channels, add_parser_help, add_parser_json,\n@@ -130,6 +130,10 @@\n if args.all and prefix == context.default_prefix:\n msg = \"cannot remove current environment. deactivate and run conda remove again\"\n raise CondaEnvironmentError(msg)\n+ if args.all and not isdir(prefix):\n+ # full environment removal was requested, but environment doesn't exist anyway\n+ return 0\n+\n ensure_use_local(args)\n ensure_override_channels_requires_channel(args)\n if not args.features and args.all:\n", "issue": "conda remove --all behavior changed in 4.2.7\n`conda remove --all` used to remove an environment if it existed, and quietly succeed if it was already gone. Now this command fails when the environment does not exist. This makes something formerly easy to do in an automated way now difficult especially in a cross-platform way on Windows.\n\n```\nC:\\>conda remove --all -n nonenv\nCondaEnvironmentNotFoundError: Could not find environment: nonenv .\nYou can list all discoverable environments with `conda info --envs`.\n\n\n\nC:\\>echo %ERRORLEVEL%\n1\n```\n\nJust suppressing the error is not appealing, because there are many error modes different from the environment not existing that should trigger a failure.\n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom argparse import RawDescriptionHelpFormatter\nfrom collections import defaultdict\nimport logging\nfrom os.path import abspath, join\nimport sys\n\nfrom .conda_argparse import (add_parser_channels, add_parser_help, add_parser_json,\n add_parser_no_pin, add_parser_no_use_index_cache, add_parser_offline,\n add_parser_prefix, add_parser_pscheck, add_parser_quiet,\n add_parser_use_index_cache, add_parser_use_local, add_parser_yes)\n\ntry:\n from cytoolz.itertoolz import groupby\nexcept ImportError:\n from .._vendor.toolz.itertoolz import groupby\n\nhelp = \"%s a list of packages from a specified conda environment.\"\ndescr = help + \"\"\"\n\nThis command will also remove any package that depends on any of the\nspecified packages as well---unless a replacement can be found without\nthat dependency. If you wish to skip this dependency checking and remove\njust the requested packages, add the '--force' option. Note however that\nthis may result in a broken environment, so use this with caution.\n\"\"\"\nexample = \"\"\"\nExamples:\n\n conda %s -n myenv scipy\n\n\"\"\"\n\nuninstall_help = \"Alias for conda remove. See conda remove --help.\"\nlog = logging.getLogger(__name__)\n\n\ndef configure_parser(sub_parsers, name='remove'):\n if name == 'remove':\n p = sub_parsers.add_parser(\n name,\n formatter_class=RawDescriptionHelpFormatter,\n description=descr % name.capitalize(),\n help=help % name.capitalize(),\n epilog=example % name,\n add_help=False,\n )\n else:\n p = sub_parsers.add_parser(\n name,\n formatter_class=RawDescriptionHelpFormatter,\n description=uninstall_help,\n help=uninstall_help,\n epilog=example % name,\n add_help=False,\n )\n add_parser_help(p)\n add_parser_yes(p)\n add_parser_json(p)\n p.add_argument(\n \"--all\",\n action=\"store_true\",\n help=\"%s all packages, i.e., the entire environment.\" % name.capitalize(),\n )\n p.add_argument(\n \"--features\",\n action=\"store_true\",\n help=\"%s features (instead of packages).\" % name.capitalize(),\n )\n p.add_argument(\n \"--force\",\n action=\"store_true\",\n help=\"Forces removal of a package without removing packages that depend on it. \"\n \"Using this option will usually leave your environment in a broken and \"\n \"inconsistent state.\",\n )\n add_parser_no_pin(p)\n add_parser_channels(p)\n add_parser_prefix(p)\n add_parser_quiet(p)\n # Putting this one first makes it the default\n add_parser_no_use_index_cache(p)\n add_parser_use_index_cache(p)\n add_parser_use_local(p)\n add_parser_offline(p)\n add_parser_pscheck(p)\n p.add_argument(\n 'package_names',\n metavar='package_name',\n action=\"store\",\n nargs='*',\n help=\"Package names to %s from the environment.\" % name,\n )\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n from .common import (confirm_yn, ensure_override_channels_requires_channel, ensure_use_local,\n names_in_specs, specs_from_args, stdout_json)\n from ..base.constants import ROOT_NO_RM\n from ..base.context import context\n from ..common.compat import iteritems, iterkeys\n from ..common.path import is_private_env_path\n from ..console import json_progress_bars\n from ..core.index import get_index\n from ..exceptions import CondaEnvironmentError, CondaValueError, PackageNotFoundError\n from ..gateways.disk.delete import delete_trash\n from ..plan import (get_blank_actions)\n from ..core.solve import get_resolve_object\n from ..core.solve import solve_for_actions\n from ..resolve import MatchSpec\n from ..core.linked_data import linked_data\n from ..gateways.disk.delete import rm_rf\n from ..instructions import PREFIX\n from ..plan import (add_unlink, display_actions, execute_actions,\n nothing_to_do, remove_actions)\n\n if not (args.all or args.package_names):\n raise CondaValueError('no package names supplied,\\n'\n ' try \"conda remove -h\" for more details')\n\n prefix = context.prefix_w_legacy_search\n if args.all and prefix == context.default_prefix:\n msg = \"cannot remove current environment. deactivate and run conda remove again\"\n raise CondaEnvironmentError(msg)\n ensure_use_local(args)\n ensure_override_channels_requires_channel(args)\n if not args.features and args.all:\n index = linked_data(prefix)\n index = {dist: info for dist, info in iteritems(index)}\n else:\n index = get_index(channel_urls=context.channels,\n prepend=not args.override_channels,\n use_local=args.use_local,\n use_cache=args.use_index_cache,\n prefix=prefix)\n specs = None\n if args.features:\n specs = [MatchSpec(track_features=f) for f in set(args.package_names)]\n actions = remove_actions(prefix, specs, index, pinned=context.respect_pinned)\n actions['ACTION'] = 'REMOVE_FEATURE'\n action_groups = (actions, index),\n elif args.all:\n if prefix == context.root_prefix:\n raise CondaEnvironmentError('cannot remove root environment,\\n'\n ' add -n NAME or -p PREFIX option')\n actions = defaultdict(list)\n actions[PREFIX] = prefix\n for dist in sorted(iterkeys(index)):\n add_unlink(actions, dist)\n actions['ACTION'] = 'REMOVE_ALL'\n action_groups = (actions, index),\n elif prefix == context.root_prefix and not context.prefix_specified:\n from ..core.envs_manager import EnvsDirectory\n ed = EnvsDirectory(join(context.root_prefix, 'envs'))\n get_env = lambda s: ed.get_registered_preferred_env(MatchSpec(s).name)\n specs = specs_from_args(args.package_names)\n env_spec_map = groupby(get_env, specs)\n action_groups = []\n for env_name, spcs in iteritems(env_spec_map):\n pfx = ed.to_prefix(env_name)\n r = get_resolve_object(index.copy(), pfx)\n specs_to_remove = tuple(MatchSpec(s) for s in spcs)\n prune = pfx != context.root_prefix\n dists_for_unlinking, dists_for_linking = solve_for_actions(\n pfx, r,\n specs_to_remove=specs_to_remove, prune=prune,\n )\n actions = get_blank_actions(pfx)\n actions['UNLINK'].extend(dists_for_unlinking)\n actions['LINK'].extend(dists_for_linking)\n actions['SPECS'].extend(s.spec for s in specs_to_remove)\n actions['ACTION'] = 'REMOVE'\n action_groups.append((actions, r.index))\n action_groups = tuple(action_groups)\n else:\n specs = specs_from_args(args.package_names)\n if sys.prefix == abspath(prefix) and names_in_specs(ROOT_NO_RM, specs) and not args.force:\n raise CondaEnvironmentError('cannot remove %s from root environment' %\n ', '.join(ROOT_NO_RM))\n action_groups = (remove_actions(prefix, list(specs), index=index, force=args.force,\n pinned=context.respect_pinned), index),\n\n delete_trash()\n if any(nothing_to_do(x[0]) for x in action_groups):\n if args.all:\n print(\"\\nRemove all packages in environment %s:\\n\" % prefix, file=sys.stderr)\n if not context.json:\n confirm_yn(args)\n rm_rf(prefix)\n\n if context.json:\n stdout_json({\n 'success': True,\n 'actions': tuple(x[0] for x in action_groups)\n })\n return\n\n pkg = str(args.package_names).replace(\"['\", \"\")\n pkg = pkg.replace(\"']\", \"\")\n\n error_message = \"No packages named '%s' found to remove from environment.\" % pkg\n raise PackageNotFoundError(error_message)\n if not context.json:\n for actions, ndx in action_groups:\n print()\n print(\"Package plan for package removal in environment %s:\" % actions[\"PREFIX\"])\n display_actions(actions, ndx)\n elif context.json and args.dry_run:\n stdout_json({\n 'success': True,\n 'dry_run': True,\n 'actions': tuple(x[0] for x in action_groups),\n })\n return\n\n if not context.json:\n confirm_yn(args)\n\n for actions, ndx in action_groups:\n if context.json and not context.quiet:\n with json_progress_bars():\n execute_actions(actions, ndx, verbose=not context.quiet)\n else:\n execute_actions(actions, ndx, verbose=not context.quiet)\n\n target_prefix = actions[\"PREFIX\"]\n if is_private_env_path(target_prefix) and linked_data(target_prefix) == {}:\n rm_rf(target_prefix)\n\n if args.all:\n rm_rf(prefix)\n\n if context.json:\n stdout_json({\n 'success': True,\n 'actions': tuple(x[0] for x in action_groups),\n })\n", "path": "conda/cli/main_remove.py"}]} | 3,369 | 214 |
gh_patches_debug_18768 | rasdani/github-patches | git_diff | deepset-ai__haystack-1901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
REST API fails when embeddings returned flag is enabled in document store
Good morning Haystack devs. Not sure if this is really a bug or I'm doing something wrong, so feel free to change the type if this issue to something else if it's not a bug.
**Describe the bug**
When the document store is forced to return the embeddings the REST API request fails because the type of the embedding is a Numpy array (`dtype=float32`) but the Pydantic dataclass of `DocumentSerialized` expects a `List[float]` and I think there is no automatic conversion between both types.
**Error message**
```
[2021-11-26 08:48:59 +0000] [10] [ERROR] Exception in ASGI application
haystack-api_1 | Traceback (most recent call last):
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/uvicorn/protocols/http/httptools_impl.py", line 375, in run_asgi
haystack-api_1 | result = await app(self.scope, self.receive, self.send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
haystack-api_1 | return await self.app(scope, receive, send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/fastapi/applications.py", line 208, in __call__
haystack-api_1 | await super().__call__(scope, receive, send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/applications.py", line 112, in __call__
haystack-api_1 | await self.middleware_stack(scope, receive, send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/middleware/errors.py", line 181, in __call__
haystack-api_1 | raise exc
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/middleware/errors.py", line 159, in __call__
haystack-api_1 | await self.app(scope, receive, _send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/middleware/cors.py", line 84, in __call__
haystack-api_1 | await self.app(scope, receive, send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/exceptions.py", line 82, in __call__
haystack-api_1 | raise exc
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/exceptions.py", line 71, in __call__
haystack-api_1 | await self.app(scope, receive, sender)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/routing.py", line 656, in __call__
haystack-api_1 | await route.handle(scope, receive, send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/routing.py", line 259, in handle
haystack-api_1 | await self.app(scope, receive, send)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/starlette/routing.py", line 61, in app
haystack-api_1 | response = await func(request)
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/fastapi/routing.py", line 243, in app
haystack-api_1 | is_coroutine=is_coroutine,
haystack-api_1 | File "/usr/local/lib/python3.7/dist-packages/fastapi/routing.py", line 137, in serialize_response
haystack-api_1 | raise ValidationError(errors, field.type_)
haystack-api_1 | pydantic.error_wrappers.ValidationError: 5 validation errors for QueryResponse
haystack-api_1 | response -> documents -> 0 -> embedding
haystack-api_1 | value is not a valid list (type=type_error.list)
haystack-api_1 | response -> documents -> 1 -> embedding
haystack-api_1 | value is not a valid list (type=type_error.list)
haystack-api_1 | response -> documents -> 2 -> embedding
haystack-api_1 | value is not a valid list (type=type_error.list)
haystack-api_1 | response -> documents -> 3 -> embedding
haystack-api_1 | value is not a valid list (type=type_error.list)
haystack-api_1 | response -> documents -> 4 -> embedding
haystack-api_1 | value is not a valid list (type=type_error.list)
```
**To Reproduce**
Just start the REST API with `return_embedding: True` set in the document store
**System:**
- Haystack version (commit or version number): 1.0RC1 (latest version from Github)
- DocumentStore: Milvus
- Reader: Farm
- Retriever: DPR
</issue>
<code>
[start of rest_api/controller/search.py]
1 import logging
2 import time
3 import json
4 from pathlib import Path
5
6 from fastapi import APIRouter
7
8 import haystack
9 from haystack.pipelines.base import Pipeline
10 from rest_api.config import PIPELINE_YAML_PATH, QUERY_PIPELINE_NAME
11 from rest_api.config import LOG_LEVEL, CONCURRENT_REQUEST_PER_WORKER
12 from rest_api.schema import QueryRequest, QueryResponse
13 from rest_api.controller.utils import RequestLimiter
14
15
16 logging.getLogger("haystack").setLevel(LOG_LEVEL)
17 logger = logging.getLogger("haystack")
18
19 from pydantic import BaseConfig
20
21 BaseConfig.arbitrary_types_allowed = True
22
23 router = APIRouter()
24
25
26 PIPELINE = Pipeline.load_from_yaml(Path(PIPELINE_YAML_PATH), pipeline_name=QUERY_PIPELINE_NAME)
27 # TODO make this generic for other pipelines with different naming
28 RETRIEVER = PIPELINE.get_node(name="Retriever")
29 DOCUMENT_STORE = RETRIEVER.document_store if RETRIEVER else None
30 logging.info(f"Loaded pipeline nodes: {PIPELINE.graph.nodes.keys()}")
31
32 concurrency_limiter = RequestLimiter(CONCURRENT_REQUEST_PER_WORKER)
33 logging.info("Concurrent requests per worker: {CONCURRENT_REQUEST_PER_WORKER}")
34
35
36 @router.get("/initialized")
37 def check_status():
38 """
39 This endpoint can be used during startup to understand if the
40 server is ready to take any requests, or is still loading.
41
42 The recommended approach is to call this endpoint with a short timeout,
43 like 500ms, and in case of no reply, consider the server busy.
44 """
45 return True
46
47
48 @router.get("/hs_version")
49 def haystack_version():
50 return {"hs_version": haystack.__version__}
51
52
53 @router.post("/query", response_model=QueryResponse, response_model_exclude_none=True)
54 def query(request: QueryRequest):
55 with concurrency_limiter.run():
56 result = _process_request(PIPELINE, request)
57 return result
58
59
60 def _process_request(pipeline, request) -> QueryResponse:
61 start_time = time.time()
62
63 params = request.params or {}
64
65 # format global, top-level filters (e.g. "params": {"filters": {"name": ["some"]}})
66 if "filters" in params.keys():
67 params["filters"] = _format_filters(params["filters"])
68
69 # format targeted node filters (e.g. "params": {"Retriever": {"filters": {"value"}}})
70 for key, value in params.items():
71 if "filters" in params[key].keys():
72 params[key]["filters"] = _format_filters(params[key]["filters"])
73
74 result = pipeline.run(query=request.query, params=params,debug=request.debug)
75 end_time = time.time()
76 logger.info(json.dumps({"request": request, "response": result, "time": f"{(end_time - start_time):.2f}"}, default=str))
77
78 return result
79
80
81 def _format_filters(filters):
82 """
83 Adjust filters to compliant format:
84 Put filter values into a list and remove filters with null value.
85 """
86 new_filters = {}
87 if filters is None:
88 logger.warning(f"Request with deprecated filter format ('\"filters\": null'). "
89 f"Remove empty filters from params to be compliant with future versions")
90 else:
91 for key, values in filters.items():
92 if values is None:
93 logger.warning(f"Request with deprecated filter format ('{key}: null'). "
94 f"Remove null values from filters to be compliant with future versions")
95 continue
96 elif not isinstance(values, list):
97 logger.warning(f"Request with deprecated filter format ('{key}': {values}). "
98 f"Change to '{key}':[{values}]' to be compliant with future versions")
99 values = [values]
100
101 new_filters[key] = values
102 return new_filters
103
[end of rest_api/controller/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rest_api/controller/search.py b/rest_api/controller/search.py
--- a/rest_api/controller/search.py
+++ b/rest_api/controller/search.py
@@ -2,6 +2,7 @@
import time
import json
from pathlib import Path
+from numpy import ndarray
from fastapi import APIRouter
@@ -72,6 +73,12 @@
params[key]["filters"] = _format_filters(params[key]["filters"])
result = pipeline.run(query=request.query, params=params,debug=request.debug)
+
+ # if any of the documents contains an embedding as an ndarray the latter needs to be converted to list of float
+ for document in result['documents'] or []:
+ if isinstance(document.embedding, ndarray):
+ document.embedding = document.embedding.tolist()
+
end_time = time.time()
logger.info(json.dumps({"request": request, "response": result, "time": f"{(end_time - start_time):.2f}"}, default=str))
| {"golden_diff": "diff --git a/rest_api/controller/search.py b/rest_api/controller/search.py\n--- a/rest_api/controller/search.py\n+++ b/rest_api/controller/search.py\n@@ -2,6 +2,7 @@\n import time\n import json\n from pathlib import Path\n+from numpy import ndarray\n \n from fastapi import APIRouter\n \n@@ -72,6 +73,12 @@\n params[key][\"filters\"] = _format_filters(params[key][\"filters\"])\n \n result = pipeline.run(query=request.query, params=params,debug=request.debug)\n+ \n+ # if any of the documents contains an embedding as an ndarray the latter needs to be converted to list of float\n+ for document in result['documents'] or []:\n+ if isinstance(document.embedding, ndarray):\n+ document.embedding = document.embedding.tolist()\n+ \n end_time = time.time()\n logger.info(json.dumps({\"request\": request, \"response\": result, \"time\": f\"{(end_time - start_time):.2f}\"}, default=str))\n", "issue": "REST API fails when embeddings returned flag is enabled in document store\nGood morning Haystack devs. Not sure if this is really a bug or I'm doing something wrong, so feel free to change the type if this issue to something else if it's not a bug.\r\n\r\n**Describe the bug**\r\nWhen the document store is forced to return the embeddings the REST API request fails because the type of the embedding is a Numpy array (`dtype=float32`) but the Pydantic dataclass of `DocumentSerialized` expects a `List[float]` and I think there is no automatic conversion between both types.\r\n\r\n**Error message**\r\n```\r\n[2021-11-26 08:48:59 +0000] [10] [ERROR] Exception in ASGI application\r\nhaystack-api_1 | Traceback (most recent call last):\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/uvicorn/protocols/http/httptools_impl.py\", line 375, in run_asgi\r\nhaystack-api_1 | result = await app(self.scope, self.receive, self.send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/uvicorn/middleware/proxy_headers.py\", line 75, in __call__\r\nhaystack-api_1 | return await self.app(scope, receive, send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/fastapi/applications.py\", line 208, in __call__\r\nhaystack-api_1 | await super().__call__(scope, receive, send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/applications.py\", line 112, in __call__\r\nhaystack-api_1 | await self.middleware_stack(scope, receive, send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/middleware/errors.py\", line 181, in __call__\r\nhaystack-api_1 | raise exc\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/middleware/errors.py\", line 159, in __call__\r\nhaystack-api_1 | await self.app(scope, receive, _send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/middleware/cors.py\", line 84, in __call__\r\nhaystack-api_1 | await self.app(scope, receive, send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/exceptions.py\", line 82, in __call__\r\nhaystack-api_1 | raise exc\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/exceptions.py\", line 71, in __call__\r\nhaystack-api_1 | await self.app(scope, receive, sender)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/routing.py\", line 656, in __call__\r\nhaystack-api_1 | await route.handle(scope, receive, send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/routing.py\", line 259, in handle\r\nhaystack-api_1 | await self.app(scope, receive, send)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/starlette/routing.py\", line 61, in app\r\nhaystack-api_1 | response = await func(request)\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/fastapi/routing.py\", line 243, in app\r\nhaystack-api_1 | is_coroutine=is_coroutine,\r\nhaystack-api_1 | File \"/usr/local/lib/python3.7/dist-packages/fastapi/routing.py\", line 137, in serialize_response\r\nhaystack-api_1 | raise ValidationError(errors, field.type_)\r\nhaystack-api_1 | pydantic.error_wrappers.ValidationError: 5 validation errors for QueryResponse\r\nhaystack-api_1 | response -> documents -> 0 -> embedding\r\nhaystack-api_1 | value is not a valid list (type=type_error.list)\r\nhaystack-api_1 | response -> documents -> 1 -> embedding\r\nhaystack-api_1 | value is not a valid list (type=type_error.list)\r\nhaystack-api_1 | response -> documents -> 2 -> embedding\r\nhaystack-api_1 | value is not a valid list (type=type_error.list)\r\nhaystack-api_1 | response -> documents -> 3 -> embedding\r\nhaystack-api_1 | value is not a valid list (type=type_error.list)\r\nhaystack-api_1 | response -> documents -> 4 -> embedding\r\nhaystack-api_1 | value is not a valid list (type=type_error.list)\r\n```\r\n\r\n\r\n**To Reproduce**\r\nJust start the REST API with `return_embedding: True` set in the document store\r\n\r\n**System:**\r\n - Haystack version (commit or version number): 1.0RC1 (latest version from Github)\r\n - DocumentStore: Milvus\r\n - Reader: Farm\r\n - Retriever: DPR\r\n\n", "before_files": [{"content": "import logging\nimport time\nimport json\nfrom pathlib import Path\n\nfrom fastapi import APIRouter\n\nimport haystack\nfrom haystack.pipelines.base import Pipeline\nfrom rest_api.config import PIPELINE_YAML_PATH, QUERY_PIPELINE_NAME\nfrom rest_api.config import LOG_LEVEL, CONCURRENT_REQUEST_PER_WORKER\nfrom rest_api.schema import QueryRequest, QueryResponse\nfrom rest_api.controller.utils import RequestLimiter\n\n\nlogging.getLogger(\"haystack\").setLevel(LOG_LEVEL)\nlogger = logging.getLogger(\"haystack\")\n\nfrom pydantic import BaseConfig\n\nBaseConfig.arbitrary_types_allowed = True\n\nrouter = APIRouter()\n\n\nPIPELINE = Pipeline.load_from_yaml(Path(PIPELINE_YAML_PATH), pipeline_name=QUERY_PIPELINE_NAME)\n# TODO make this generic for other pipelines with different naming\nRETRIEVER = PIPELINE.get_node(name=\"Retriever\")\nDOCUMENT_STORE = RETRIEVER.document_store if RETRIEVER else None\nlogging.info(f\"Loaded pipeline nodes: {PIPELINE.graph.nodes.keys()}\")\n\nconcurrency_limiter = RequestLimiter(CONCURRENT_REQUEST_PER_WORKER)\nlogging.info(\"Concurrent requests per worker: {CONCURRENT_REQUEST_PER_WORKER}\")\n\n\[email protected](\"/initialized\")\ndef check_status():\n \"\"\"\n This endpoint can be used during startup to understand if the \n server is ready to take any requests, or is still loading.\n\n The recommended approach is to call this endpoint with a short timeout,\n like 500ms, and in case of no reply, consider the server busy.\n \"\"\"\n return True\n\n\[email protected](\"/hs_version\")\ndef haystack_version():\n return {\"hs_version\": haystack.__version__}\n\n\[email protected](\"/query\", response_model=QueryResponse, response_model_exclude_none=True)\ndef query(request: QueryRequest):\n with concurrency_limiter.run():\n result = _process_request(PIPELINE, request)\n return result\n\n\ndef _process_request(pipeline, request) -> QueryResponse:\n start_time = time.time()\n \n params = request.params or {}\n\n # format global, top-level filters (e.g. \"params\": {\"filters\": {\"name\": [\"some\"]}})\n if \"filters\" in params.keys():\n params[\"filters\"] = _format_filters(params[\"filters\"])\n\n # format targeted node filters (e.g. \"params\": {\"Retriever\": {\"filters\": {\"value\"}}})\n for key, value in params.items():\n if \"filters\" in params[key].keys():\n params[key][\"filters\"] = _format_filters(params[key][\"filters\"])\n\n result = pipeline.run(query=request.query, params=params,debug=request.debug)\n end_time = time.time()\n logger.info(json.dumps({\"request\": request, \"response\": result, \"time\": f\"{(end_time - start_time):.2f}\"}, default=str))\n\n return result\n\n\ndef _format_filters(filters):\n \"\"\"\n Adjust filters to compliant format:\n Put filter values into a list and remove filters with null value.\n \"\"\"\n new_filters = {}\n if filters is None:\n logger.warning(f\"Request with deprecated filter format ('\\\"filters\\\": null'). \"\n f\"Remove empty filters from params to be compliant with future versions\")\n else:\n for key, values in filters.items():\n if values is None:\n logger.warning(f\"Request with deprecated filter format ('{key}: null'). \"\n f\"Remove null values from filters to be compliant with future versions\")\n continue\n elif not isinstance(values, list):\n logger.warning(f\"Request with deprecated filter format ('{key}': {values}). \"\n f\"Change to '{key}':[{values}]' to be compliant with future versions\")\n values = [values]\n\n new_filters[key] = values\n return new_filters\n", "path": "rest_api/controller/search.py"}]} | 2,723 | 211 |
gh_patches_debug_25544 | rasdani/github-patches | git_diff | AnalogJ__lexicon-486 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cloudflare provider only lists first DNS 100 entries for domain
How to reproduce:
* Have a CloudFlare domain with > 100 entries
* Create a new entry starting with a "high" letter, e.g. `zzzz.<domain>`
* List entries: only the first 100 entries will be returned, making it appear as though `zzzz` had not been created.
```
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {'per_page': 100} ####### NOTE
if rtype:
filter_obj['type'] = rtype
if name:
filter_obj['name'] = self._full_name(name)
if content:
filter_obj['content'] = content
payload = self._get(
'/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
### subsequent pages not retrieved
records = []
for record in payload['result']:
processed_record = {
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
LOGGER.debug('list_records: %s', records)
return records
```
This causes weird errors whereby an existing entry is not returned if falls after the 100th entry.
Without specifying the "per page" parameter, only the first 20 entries are returned.
Besides a quick-n-dirty solution of raising that number to e.g. 1000, the proper solution would be to implement proper pagination support for the _get method.
</issue>
<code>
[start of lexicon/providers/cloudflare.py]
1 """Module provider for Cloudflare"""
2 from __future__ import absolute_import
3 import json
4 import logging
5
6 import requests
7 from lexicon.providers.base import Provider as BaseProvider
8
9
10 LOGGER = logging.getLogger(__name__)
11
12 NAMESERVER_DOMAINS = ['cloudflare.com']
13
14
15 def provider_parser(subparser):
16 """Return the parser for this provider"""
17 subparser.add_argument(
18 "--auth-username", help="specify email address for authentication")
19 subparser.add_argument(
20 "--auth-token", help="specify token for authentication")
21
22
23 class Provider(BaseProvider):
24 """Provider class for Cloudflare"""
25 def __init__(self, config):
26 super(Provider, self).__init__(config)
27 self.domain_id = None
28 self.api_endpoint = 'https://api.cloudflare.com/client/v4'
29
30 def _authenticate(self):
31
32 payload = self._get('/zones', {
33 'name': self.domain,
34 'status': 'active'
35 })
36
37 if not payload['result']:
38 raise Exception('No domain found')
39 if len(payload['result']) > 1:
40 raise Exception('Too many domains found. This should not happen')
41
42 self.domain_id = payload['result'][0]['id']
43
44 # Create record. If record already exists with the same content, do nothing'
45
46 def _create_record(self, rtype, name, content):
47 data = {'type': rtype, 'name': self._full_name(
48 name), 'content': content}
49 if self._get_lexicon_option('ttl'):
50 data['ttl'] = self._get_lexicon_option('ttl')
51
52 payload = {'success': True}
53 try:
54 payload = self._post(
55 '/zones/{0}/dns_records'.format(self.domain_id), data)
56 except requests.exceptions.HTTPError as err:
57 already_exists = next((True for error in err.response.json()[
58 'errors'] if error['code'] == 81057), False)
59 if not already_exists:
60 raise
61
62 LOGGER.debug('create_record: %s', payload['success'])
63 return payload['success']
64
65 # List all records. Return an empty list if no records found
66 # type, name and content are used to filter records.
67 # If possible filter during the query, otherwise filter after response is received.
68 def _list_records(self, rtype=None, name=None, content=None):
69 filter_obj = {'per_page': 100}
70 if rtype:
71 filter_obj['type'] = rtype
72 if name:
73 filter_obj['name'] = self._full_name(name)
74 if content:
75 filter_obj['content'] = content
76
77 payload = self._get(
78 '/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
79
80 records = []
81 for record in payload['result']:
82 processed_record = {
83 'type': record['type'],
84 'name': record['name'],
85 'ttl': record['ttl'],
86 'content': record['content'],
87 'id': record['id']
88 }
89 records.append(processed_record)
90
91 LOGGER.debug('list_records: %s', records)
92 return records
93
94 # Create or update a record.
95 def _update_record(self, identifier, rtype=None, name=None, content=None):
96
97 data = {}
98 if rtype:
99 data['type'] = rtype
100 if name:
101 data['name'] = self._full_name(name)
102 if content:
103 data['content'] = content
104 if self._get_lexicon_option('ttl'):
105 data['ttl'] = self._get_lexicon_option('ttl')
106
107 payload = self._put(
108 '/zones/{0}/dns_records/{1}'.format(self.domain_id, identifier), data)
109
110 LOGGER.debug('update_record: %s', payload['success'])
111 return payload['success']
112
113 # Delete an existing record.
114 # If record does not exist, do nothing.
115 def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
116 delete_record_id = []
117 if not identifier:
118 records = self._list_records(rtype, name, content)
119 delete_record_id = [record['id'] for record in records]
120 else:
121 delete_record_id.append(identifier)
122
123 LOGGER.debug('delete_records: %s', delete_record_id)
124
125 for record_id in delete_record_id:
126 self._delete(
127 '/zones/{0}/dns_records/{1}'.format(self.domain_id, record_id))
128
129 LOGGER.debug('delete_record: %s', True)
130 return True
131
132 # Helpers
133 def _request(self, action='GET', url='/', data=None, query_params=None):
134 if data is None:
135 data = {}
136 if query_params is None:
137 query_params = {}
138 response = requests.request(action, self.api_endpoint + url, params=query_params,
139 data=json.dumps(data),
140 headers={
141 'X-Auth-Email': self._get_provider_option('auth_username'),
142 'X-Auth-Key': self._get_provider_option('auth_token'),
143 'Content-Type': 'application/json'
144 })
145 # if the request fails for any reason, throw an error.
146 response.raise_for_status()
147 return response.json()
148
[end of lexicon/providers/cloudflare.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lexicon/providers/cloudflare.py b/lexicon/providers/cloudflare.py
--- a/lexicon/providers/cloudflare.py
+++ b/lexicon/providers/cloudflare.py
@@ -74,21 +74,31 @@
if content:
filter_obj['content'] = content
- payload = self._get(
- '/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
-
records = []
- for record in payload['result']:
- processed_record = {
- 'type': record['type'],
- 'name': record['name'],
- 'ttl': record['ttl'],
- 'content': record['content'],
- 'id': record['id']
- }
- records.append(processed_record)
+ while True:
+ payload = self._get(
+ '/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
+
+ LOGGER.debug("payload: %s", payload)
+
+ for record in payload['result']:
+ processed_record = {
+ 'type': record['type'],
+ 'name': record['name'],
+ 'ttl': record['ttl'],
+ 'content': record['content'],
+ 'id': record['id']
+ }
+ records.append(processed_record)
+
+ pages = payload['result_info']['total_pages']
+ page = payload['result_info']['page']
+ if page >= pages:
+ break
+ filter_obj['page'] = page + 1
LOGGER.debug('list_records: %s', records)
+ LOGGER.debug('Number of records retrieved: %d', len(records))
return records
# Create or update a record.
| {"golden_diff": "diff --git a/lexicon/providers/cloudflare.py b/lexicon/providers/cloudflare.py\n--- a/lexicon/providers/cloudflare.py\n+++ b/lexicon/providers/cloudflare.py\n@@ -74,21 +74,31 @@\n if content:\n filter_obj['content'] = content\n \n- payload = self._get(\n- '/zones/{0}/dns_records'.format(self.domain_id), filter_obj)\n-\n records = []\n- for record in payload['result']:\n- processed_record = {\n- 'type': record['type'],\n- 'name': record['name'],\n- 'ttl': record['ttl'],\n- 'content': record['content'],\n- 'id': record['id']\n- }\n- records.append(processed_record)\n+ while True:\n+ payload = self._get(\n+ '/zones/{0}/dns_records'.format(self.domain_id), filter_obj)\n+\n+ LOGGER.debug(\"payload: %s\", payload)\n+\n+ for record in payload['result']:\n+ processed_record = {\n+ 'type': record['type'],\n+ 'name': record['name'],\n+ 'ttl': record['ttl'],\n+ 'content': record['content'],\n+ 'id': record['id']\n+ }\n+ records.append(processed_record)\n+\n+ pages = payload['result_info']['total_pages']\n+ page = payload['result_info']['page']\n+ if page >= pages:\n+ break\n+ filter_obj['page'] = page + 1\n \n LOGGER.debug('list_records: %s', records)\n+ LOGGER.debug('Number of records retrieved: %d', len(records))\n return records\n \n # Create or update a record.\n", "issue": "Cloudflare provider only lists first DNS 100 entries for domain\nHow to reproduce:\r\n\r\n* Have a CloudFlare domain with > 100 entries\r\n* Create a new entry starting with a \"high\" letter, e.g. `zzzz.<domain>`\r\n* List entries: only the first 100 entries will be returned, making it appear as though `zzzz` had not been created.\r\n\r\n\r\n```\r\n def _list_records(self, rtype=None, name=None, content=None):\r\n filter_obj = {'per_page': 100} ####### NOTE\r\n if rtype:\r\n filter_obj['type'] = rtype\r\n if name:\r\n filter_obj['name'] = self._full_name(name)\r\n if content:\r\n filter_obj['content'] = content\r\n\r\n payload = self._get(\r\n '/zones/{0}/dns_records'.format(self.domain_id), filter_obj) \r\n ### subsequent pages not retrieved\r\n\r\n records = []\r\n for record in payload['result']:\r\n processed_record = {\r\n 'type': record['type'],\r\n 'name': record['name'],\r\n 'ttl': record['ttl'],\r\n 'content': record['content'],\r\n 'id': record['id']\r\n }\r\n records.append(processed_record)\r\n\r\n LOGGER.debug('list_records: %s', records)\r\n return records\r\n```\r\n\r\nThis causes weird errors whereby an existing entry is not returned if falls after the 100th entry.\r\n\r\nWithout specifying the \"per page\" parameter, only the first 20 entries are returned.\r\n\r\nBesides a quick-n-dirty solution of raising that number to e.g. 1000, the proper solution would be to implement proper pagination support for the _get method.\n", "before_files": [{"content": "\"\"\"Module provider for Cloudflare\"\"\"\nfrom __future__ import absolute_import\nimport json\nimport logging\n\nimport requests\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = ['cloudflare.com']\n\n\ndef provider_parser(subparser):\n \"\"\"Return the parser for this provider\"\"\"\n subparser.add_argument(\n \"--auth-username\", help=\"specify email address for authentication\")\n subparser.add_argument(\n \"--auth-token\", help=\"specify token for authentication\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Cloudflare\"\"\"\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = 'https://api.cloudflare.com/client/v4'\n\n def _authenticate(self):\n\n payload = self._get('/zones', {\n 'name': self.domain,\n 'status': 'active'\n })\n\n if not payload['result']:\n raise Exception('No domain found')\n if len(payload['result']) > 1:\n raise Exception('Too many domains found. This should not happen')\n\n self.domain_id = payload['result'][0]['id']\n\n # Create record. If record already exists with the same content, do nothing'\n\n def _create_record(self, rtype, name, content):\n data = {'type': rtype, 'name': self._full_name(\n name), 'content': content}\n if self._get_lexicon_option('ttl'):\n data['ttl'] = self._get_lexicon_option('ttl')\n\n payload = {'success': True}\n try:\n payload = self._post(\n '/zones/{0}/dns_records'.format(self.domain_id), data)\n except requests.exceptions.HTTPError as err:\n already_exists = next((True for error in err.response.json()[\n 'errors'] if error['code'] == 81057), False)\n if not already_exists:\n raise\n\n LOGGER.debug('create_record: %s', payload['success'])\n return payload['success']\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n filter_obj = {'per_page': 100}\n if rtype:\n filter_obj['type'] = rtype\n if name:\n filter_obj['name'] = self._full_name(name)\n if content:\n filter_obj['content'] = content\n\n payload = self._get(\n '/zones/{0}/dns_records'.format(self.domain_id), filter_obj)\n\n records = []\n for record in payload['result']:\n processed_record = {\n 'type': record['type'],\n 'name': record['name'],\n 'ttl': record['ttl'],\n 'content': record['content'],\n 'id': record['id']\n }\n records.append(processed_record)\n\n LOGGER.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n\n data = {}\n if rtype:\n data['type'] = rtype\n if name:\n data['name'] = self._full_name(name)\n if content:\n data['content'] = content\n if self._get_lexicon_option('ttl'):\n data['ttl'] = self._get_lexicon_option('ttl')\n\n payload = self._put(\n '/zones/{0}/dns_records/{1}'.format(self.domain_id, identifier), data)\n\n LOGGER.debug('update_record: %s', payload['success'])\n return payload['success']\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n\n LOGGER.debug('delete_records: %s', delete_record_id)\n\n for record_id in delete_record_id:\n self._delete(\n '/zones/{0}/dns_records/{1}'.format(self.domain_id, record_id))\n\n LOGGER.debug('delete_record: %s', True)\n return True\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n response = requests.request(action, self.api_endpoint + url, params=query_params,\n data=json.dumps(data),\n headers={\n 'X-Auth-Email': self._get_provider_option('auth_username'),\n 'X-Auth-Key': self._get_provider_option('auth_token'),\n 'Content-Type': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n", "path": "lexicon/providers/cloudflare.py"}]} | 2,365 | 373 |
gh_patches_debug_20211 | rasdani/github-patches | git_diff | ultrabug__py3status-2075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
do_not_disturb: mako doesn't work anymore (changed commands for dnd)
I use mako as my notification daemon and want to use the do_not_disturb module, which does not work.
py3status calls `makoctl set invisble={}`, however current version of mako uses
`makoctl set-mode do-not-disturb` to set DND mode and
`makoctl set-mode default` to unset DND mode.
**Your py3status version**
py3status version 3.39 (python 3.10.0) on i3 (it's sway, actually.)
**To Reproduce**
1. Add module do_not_disturb
2. Set `server` to `mako`
3. Switch to DND mode
4. Send a notification
**Expected behavior**
Notification shouldn't be shown
</issue>
<code>
[start of py3status/modules/do_not_disturb.py]
1 r"""
2 Turn on and off desktop notifications.
3
4 Configuration parameters:
5 cache_timeout: refresh interval for this module; for xfce4-notifyd
6 (default 30)
7 format: display format for this module
8 (default '{name} [\?color=state&show DND]')
9 pause: specify whether to pause or kill processes; for dunst
10 see `Dunst Miscellaneous` section for more information
11 (default True)
12 server: specify server to use, eg mako, dunst or xfce4-notifyd, otherwise auto
13 (default None)
14 state: specify state to use on startup, otherwise last
15 False: disable Do Not Disturb on startup
16 True: enable Do Not Disturb on startup
17 last: toggle last known state on startup
18 None: query current state from notification manager (doesn't work on dunst<1.5.0)
19 (default 'last')
20 thresholds: specify color thresholds to use
21 (default [(0, 'bad'), (1, 'good')])
22
23 Format placeholders:
24 {name} name, eg Mako, Dunst, Xfce4-notifyd
25 {state} do not disturb state, eg 0, 1
26
27 Color thresholds:
28 xxx: print a color based on the value of `xxx` placeholder
29
30 Dunst Miscellaneous:
31 When paused, dunst will not display any notifications but keep all
32 notifications in a queue. This can for example be wrapped around a screen
33 locker (i3lock, slock) to prevent flickering of notifications through the
34 lock and to read all missed notifications after returning to the computer.
35 This means that by default (pause = False), all notifications sent while
36 DND is active will NOT be queued and displayed when DND is deactivated.
37
38
39 Examples:
40 ```
41 # display ON/OFF
42 do_not_disturb {
43 format = '{name} [\?color=state [\?if=state ON|OFF]]'
44 }
45
46 # display 1/0
47 do_not_disturb {
48 format = '{name} [\?color=state {state}]'
49 }
50
51 # display DO NOT DISTURB/DISTURB
52 do_not_disturb {
53 format = '[\?color=state [\?if=state DO NOT DISTURB|DISTURB]]'
54 thresholds = [(0, "darkgray"), (1, "good")]
55 }
56 ```
57
58 @author Maxim Baz https://github.com/maximbaz (dunst)
59 @author Robert Ricci https://github.com/ricci (xfce4-notifyd)
60 @author Cyrinux https://github.com/cyrinux (mako)
61 @license BSD
62
63 SAMPLE OUTPUT
64 [{'full_text': 'Dunst '}, {'color': '#00FF00', 'full_text': 'DND'}]
65
66 off
67 [{'full_text': 'Dunst '}, {'color': '#FF0000', 'full_text': 'DND'}]
68 """
69
70 STRING_NOT_INSTALLED = "server `{}` not installed"
71 STRING_INVALID_SERVER = "invalid server `{}`"
72 STRING_INVALID_STATE = "invalid state `{}`"
73
74
75 class Notification:
76 def __init__(self, parent):
77 self.parent = parent
78 self.setup(parent)
79
80 def setup(self, parent):
81 pass
82
83 def get_state(self):
84 return self.parent.state
85
86
87 class Dunst(Notification):
88 """
89 Dunst Notification.
90 """
91
92 def setup(self, parent):
93 self.has_dunstctl = bool(self.parent.py3.check_commands(["dunstctl"]))
94
95 def get_state(self):
96 if self.has_dunstctl:
97 state = self.parent.py3.command_output("dunstctl is-paused")
98 return state.strip() == "true"
99 else:
100 return self.parent.state
101
102 def toggle(self, state):
103 if self.has_dunstctl:
104 self.parent.py3.command_run(
105 "dunstctl set-paused {}".format(str(state).lower())
106 )
107 elif state:
108 # pause
109 self.parent.py3.command_run("pkill -SIGUSR1 dunst")
110 else:
111 if self.parent.pause:
112 # resume
113 self.parent.py3.command_run("pkill -SIGUSR2 dunst")
114 else:
115 # delete all pending notifications and resume
116 self.parent.py3.command_run("pkill -SIGTERM dunst")
117
118
119 class Mako(Notification):
120 """
121 Mako Notification.
122 """
123
124 def toggle(self, state):
125 self.parent.py3.command_run("makoctl set invisible={}".format(int(state)))
126
127
128 class Xfce4_notifyd(Notification):
129 """
130 XFCE4 Notification.
131 """
132
133 def setup(self, parent):
134 from dbus import Interface, SessionBus
135
136 self.iface = Interface(
137 SessionBus().get_object("org.xfce.Xfconf", "/org/xfce/Xfconf"),
138 "org.xfce.Xfconf",
139 )
140
141 def get_state(self):
142 return self.iface.GetProperty("xfce4-notifyd", "/do-not-disturb")
143
144 def toggle(self, state):
145 self.iface.SetProperty("xfce4-notifyd", "/do-not-disturb", state)
146
147
148 class Py3status:
149 """
150 """
151
152 # available configuration parameters
153 cache_timeout = 30
154 format = r"{name} [\?color=state&show DND]"
155 pause = True
156 server = None
157 state = "last"
158 thresholds = [(0, "bad"), (1, "good")]
159
160 def post_config_hook(self):
161 servers = ["dunst", "mako", "xfce4-notifyd", None]
162 if not self.server:
163 for server in servers:
164 if server:
165 try:
166 if self.py3.command_output(["pgrep", "-x", server]):
167 self.server = server
168 break
169 except self.py3.CommandError:
170 pass
171 else:
172 self.server = self.py3.check_commands(servers[:-1]) or "dunst"
173 elif self.server not in servers:
174 raise Exception(STRING_INVALID_SERVER.format(self.server))
175 else:
176 command = self.server.replace("notifyd", "notifyd-config")
177 if not self.py3.check_commands(command):
178 raise Exception(STRING_NOT_INSTALLED.format(command))
179
180 if self.server == "dunst":
181 self.backend = Dunst(self)
182 elif self.server == "mako":
183 self.backend = Mako(self)
184 elif self.server == "xfce4-notifyd":
185 self.backend = Xfce4_notifyd(self)
186
187 if self.state is not None:
188 if self.state == "last":
189 self.state = self.py3.storage_get("state") or 0
190 if self.state in [False, True]:
191 self.backend.toggle(self.state)
192 else:
193 raise Exception(STRING_INVALID_STATE.format(self.state))
194 elif self.server == "dunst" and not self.backend.has_dunstctl:
195 raise Exception(STRING_INVALID_STATE.format(self.state))
196
197 self.name = self.server.capitalize()
198 self.thresholds_init = self.py3.get_color_names_list(self.format)
199
200 def do_not_disturb(self):
201 self.state = self.backend.get_state()
202 dnd_data = {"state": int(self.state), "name": self.name}
203
204 for x in self.thresholds_init:
205 if x in dnd_data:
206 self.py3.threshold_get_color(dnd_data[x], x)
207
208 return {
209 "cached_until": self.py3.time_in(self.cache_timeout),
210 "full_text": self.py3.safe_format(self.format, dnd_data),
211 }
212
213 def on_click(self, event):
214 self.state = not self.state
215 self.py3.storage_set("state", self.state)
216 self.backend.toggle(self.state)
217
218
219 if __name__ == "__main__":
220 """
221 Run module in test mode.
222 """
223 from py3status.module_test import module_test
224
225 module_test(Py3status)
226
[end of py3status/modules/do_not_disturb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/modules/do_not_disturb.py b/py3status/modules/do_not_disturb.py
--- a/py3status/modules/do_not_disturb.py
+++ b/py3status/modules/do_not_disturb.py
@@ -35,6 +35,14 @@
This means that by default (pause = False), all notifications sent while
DND is active will NOT be queued and displayed when DND is deactivated.
+Mako Miscellaneous:
+ Mako requires that you manually create a 'do-not-disturb' mode as shown
+ in https://man.voidlinux.org/mako.5#MODES.
+ This module expects this mode to be configured by the user as suggested by
+ the mako documentation:
+ [mode=do-not-disturb]
+ invisible=1
+
Examples:
```
@@ -121,8 +129,14 @@
Mako Notification.
"""
+ def setup(self, parent):
+ self.toggle(parent.state)
+
def toggle(self, state):
- self.parent.py3.command_run("makoctl set invisible={}".format(int(state)))
+ if state is True:
+ self.parent.py3.command_run("makoctl set-mode do-not-disturb")
+ else:
+ self.parent.py3.command_run("makoctl set-mode default")
class Xfce4_notifyd(Notification):
| {"golden_diff": "diff --git a/py3status/modules/do_not_disturb.py b/py3status/modules/do_not_disturb.py\n--- a/py3status/modules/do_not_disturb.py\n+++ b/py3status/modules/do_not_disturb.py\n@@ -35,6 +35,14 @@\n This means that by default (pause = False), all notifications sent while\n DND is active will NOT be queued and displayed when DND is deactivated.\n \n+Mako Miscellaneous:\n+ Mako requires that you manually create a 'do-not-disturb' mode as shown\n+ in https://man.voidlinux.org/mako.5#MODES.\n+ This module expects this mode to be configured by the user as suggested by\n+ the mako documentation:\n+ [mode=do-not-disturb]\n+ invisible=1\n+\n \n Examples:\n ```\n@@ -121,8 +129,14 @@\n Mako Notification.\n \"\"\"\n \n+ def setup(self, parent):\n+ self.toggle(parent.state)\n+\n def toggle(self, state):\n- self.parent.py3.command_run(\"makoctl set invisible={}\".format(int(state)))\n+ if state is True:\n+ self.parent.py3.command_run(\"makoctl set-mode do-not-disturb\")\n+ else:\n+ self.parent.py3.command_run(\"makoctl set-mode default\")\n \n \n class Xfce4_notifyd(Notification):\n", "issue": "do_not_disturb: mako doesn't work anymore (changed commands for dnd)\nI use mako as my notification daemon and want to use the do_not_disturb module, which does not work.\r\n\r\npy3status calls `makoctl set invisble={}`, however current version of mako uses\r\n`makoctl set-mode do-not-disturb` to set DND mode and\r\n`makoctl set-mode default` to unset DND mode.\r\n\r\n**Your py3status version**\r\npy3status version 3.39 (python 3.10.0) on i3 (it's sway, actually.)\r\n\r\n**To Reproduce**\r\n1. Add module do_not_disturb\r\n2. Set `server` to `mako`\r\n3. Switch to DND mode\r\n4. Send a notification\r\n\r\n**Expected behavior**\r\nNotification shouldn't be shown\n", "before_files": [{"content": "r\"\"\"\nTurn on and off desktop notifications.\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module; for xfce4-notifyd\n (default 30)\n format: display format for this module\n (default '{name} [\\?color=state&show DND]')\n pause: specify whether to pause or kill processes; for dunst\n see `Dunst Miscellaneous` section for more information\n (default True)\n server: specify server to use, eg mako, dunst or xfce4-notifyd, otherwise auto\n (default None)\n state: specify state to use on startup, otherwise last\n False: disable Do Not Disturb on startup\n True: enable Do Not Disturb on startup\n last: toggle last known state on startup\n None: query current state from notification manager (doesn't work on dunst<1.5.0)\n (default 'last')\n thresholds: specify color thresholds to use\n (default [(0, 'bad'), (1, 'good')])\n\nFormat placeholders:\n {name} name, eg Mako, Dunst, Xfce4-notifyd\n {state} do not disturb state, eg 0, 1\n\nColor thresholds:\n xxx: print a color based on the value of `xxx` placeholder\n\nDunst Miscellaneous:\n When paused, dunst will not display any notifications but keep all\n notifications in a queue. This can for example be wrapped around a screen\n locker (i3lock, slock) to prevent flickering of notifications through the\n lock and to read all missed notifications after returning to the computer.\n This means that by default (pause = False), all notifications sent while\n DND is active will NOT be queued and displayed when DND is deactivated.\n\n\nExamples:\n```\n# display ON/OFF\ndo_not_disturb {\n format = '{name} [\\?color=state [\\?if=state ON|OFF]]'\n}\n\n# display 1/0\ndo_not_disturb {\n format = '{name} [\\?color=state {state}]'\n}\n\n# display DO NOT DISTURB/DISTURB\ndo_not_disturb {\n format = '[\\?color=state [\\?if=state DO NOT DISTURB|DISTURB]]'\n thresholds = [(0, \"darkgray\"), (1, \"good\")]\n}\n```\n\n@author Maxim Baz https://github.com/maximbaz (dunst)\n@author Robert Ricci https://github.com/ricci (xfce4-notifyd)\n@author Cyrinux https://github.com/cyrinux (mako)\n@license BSD\n\nSAMPLE OUTPUT\n[{'full_text': 'Dunst '}, {'color': '#00FF00', 'full_text': 'DND'}]\n\noff\n[{'full_text': 'Dunst '}, {'color': '#FF0000', 'full_text': 'DND'}]\n\"\"\"\n\nSTRING_NOT_INSTALLED = \"server `{}` not installed\"\nSTRING_INVALID_SERVER = \"invalid server `{}`\"\nSTRING_INVALID_STATE = \"invalid state `{}`\"\n\n\nclass Notification:\n def __init__(self, parent):\n self.parent = parent\n self.setup(parent)\n\n def setup(self, parent):\n pass\n\n def get_state(self):\n return self.parent.state\n\n\nclass Dunst(Notification):\n \"\"\"\n Dunst Notification.\n \"\"\"\n\n def setup(self, parent):\n self.has_dunstctl = bool(self.parent.py3.check_commands([\"dunstctl\"]))\n\n def get_state(self):\n if self.has_dunstctl:\n state = self.parent.py3.command_output(\"dunstctl is-paused\")\n return state.strip() == \"true\"\n else:\n return self.parent.state\n\n def toggle(self, state):\n if self.has_dunstctl:\n self.parent.py3.command_run(\n \"dunstctl set-paused {}\".format(str(state).lower())\n )\n elif state:\n # pause\n self.parent.py3.command_run(\"pkill -SIGUSR1 dunst\")\n else:\n if self.parent.pause:\n # resume\n self.parent.py3.command_run(\"pkill -SIGUSR2 dunst\")\n else:\n # delete all pending notifications and resume\n self.parent.py3.command_run(\"pkill -SIGTERM dunst\")\n\n\nclass Mako(Notification):\n \"\"\"\n Mako Notification.\n \"\"\"\n\n def toggle(self, state):\n self.parent.py3.command_run(\"makoctl set invisible={}\".format(int(state)))\n\n\nclass Xfce4_notifyd(Notification):\n \"\"\"\n XFCE4 Notification.\n \"\"\"\n\n def setup(self, parent):\n from dbus import Interface, SessionBus\n\n self.iface = Interface(\n SessionBus().get_object(\"org.xfce.Xfconf\", \"/org/xfce/Xfconf\"),\n \"org.xfce.Xfconf\",\n )\n\n def get_state(self):\n return self.iface.GetProperty(\"xfce4-notifyd\", \"/do-not-disturb\")\n\n def toggle(self, state):\n self.iface.SetProperty(\"xfce4-notifyd\", \"/do-not-disturb\", state)\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n cache_timeout = 30\n format = r\"{name} [\\?color=state&show DND]\"\n pause = True\n server = None\n state = \"last\"\n thresholds = [(0, \"bad\"), (1, \"good\")]\n\n def post_config_hook(self):\n servers = [\"dunst\", \"mako\", \"xfce4-notifyd\", None]\n if not self.server:\n for server in servers:\n if server:\n try:\n if self.py3.command_output([\"pgrep\", \"-x\", server]):\n self.server = server\n break\n except self.py3.CommandError:\n pass\n else:\n self.server = self.py3.check_commands(servers[:-1]) or \"dunst\"\n elif self.server not in servers:\n raise Exception(STRING_INVALID_SERVER.format(self.server))\n else:\n command = self.server.replace(\"notifyd\", \"notifyd-config\")\n if not self.py3.check_commands(command):\n raise Exception(STRING_NOT_INSTALLED.format(command))\n\n if self.server == \"dunst\":\n self.backend = Dunst(self)\n elif self.server == \"mako\":\n self.backend = Mako(self)\n elif self.server == \"xfce4-notifyd\":\n self.backend = Xfce4_notifyd(self)\n\n if self.state is not None:\n if self.state == \"last\":\n self.state = self.py3.storage_get(\"state\") or 0\n if self.state in [False, True]:\n self.backend.toggle(self.state)\n else:\n raise Exception(STRING_INVALID_STATE.format(self.state))\n elif self.server == \"dunst\" and not self.backend.has_dunstctl:\n raise Exception(STRING_INVALID_STATE.format(self.state))\n\n self.name = self.server.capitalize()\n self.thresholds_init = self.py3.get_color_names_list(self.format)\n\n def do_not_disturb(self):\n self.state = self.backend.get_state()\n dnd_data = {\"state\": int(self.state), \"name\": self.name}\n\n for x in self.thresholds_init:\n if x in dnd_data:\n self.py3.threshold_get_color(dnd_data[x], x)\n\n return {\n \"cached_until\": self.py3.time_in(self.cache_timeout),\n \"full_text\": self.py3.safe_format(self.format, dnd_data),\n }\n\n def on_click(self, event):\n self.state = not self.state\n self.py3.storage_set(\"state\", self.state)\n self.backend.toggle(self.state)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/do_not_disturb.py"}]} | 3,018 | 304 |
gh_patches_debug_5027 | rasdani/github-patches | git_diff | holoviz__panel-5818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
find_requirements includes datetime and random
I'm on the current `main` branch of Panel.
When I use `find_requirements` like below I get "packages" included like `datetime` and `random` from the `stdlib`.
```python
from panel.io.convert import find_requirements
code = """
import panel as pn
import pandas as pd
import random
from datetime import datetime, timedelta
"""
requirements = find_requirements(code)
print(requirements)
```
```bash
$ python app.py
['datetime', 'pandas', 'panel', 'random']
```
</issue>
<code>
[start of panel/io/mime_render.py]
1 """
2 Utilities for executing Python code and rendering the resulting output
3 using a similar MIME-type based rendering system as implemented by
4 IPython.
5
6 Attempts to limit the actual MIME types that have to be rendered on
7 to a minimum simplifying frontend implementation:
8
9 - application/bokeh: Model JSON representation
10 - text/plain: HTML escaped string output
11 - text/html: HTML code to insert into the DOM
12 """
13
14 from __future__ import annotations
15
16 import ast
17 import base64
18 import copy
19 import io
20 import pathlib
21 import pkgutil
22 import sys
23 import traceback
24
25 from contextlib import redirect_stderr, redirect_stdout
26 from html import escape
27 from textwrap import dedent
28 from typing import Any, Dict, List
29
30 import markdown
31
32 #---------------------------------------------------------------------
33 # Import API
34 #---------------------------------------------------------------------
35
36 def _stdlibs():
37 env_dir = str(pathlib.Path(sys.executable).parent.parent)
38 modules = list(sys.builtin_module_names)
39 for m in pkgutil.iter_modules():
40 mpath = getattr(m.module_finder, 'path', '')
41 if mpath.startswith(env_dir) and 'site-packages' not in mpath:
42 modules.append(m.name)
43 return modules
44
45 _STDLIBS = _stdlibs()
46 _PACKAGE_MAP = {
47 'sklearn': 'scikit-learn',
48 'transformers_js': 'transformers-js-py',
49 }
50 _IGNORED_PKGS = ['js', 'pyodide']
51 _PANDAS_AUTODETECT = ['bokeh.sampledata', 'as_frame']
52
53 def find_requirements(code: str) -> List[str]:
54 """
55 Finds the packages required in a string of code.
56
57 Parameters
58 ----------
59 code : str
60 the Python code to run.
61
62 Returns
63 -------
64 ``List[str]``
65 A list of package names that are to be installed for the code to be able to run.
66
67 Examples
68 --------
69 >>> code = "import numpy as np; import scipy.stats"
70 >>> find_imports(code)
71 ['numpy', 'scipy']
72 """
73 # handle mis-indented input from multi-line strings
74 code = dedent(code)
75
76 mod = ast.parse(code)
77 imports = set()
78 for node in ast.walk(mod):
79 if isinstance(node, ast.Import):
80 for name in node.names:
81 node_name = name.name
82 imports.add(node_name.split(".")[0])
83 elif isinstance(node, ast.ImportFrom):
84 module_name = node.module
85 if module_name is None:
86 continue
87 imports.add(module_name.split(".")[0])
88
89 packages = []
90 for pkg in sorted(imports):
91 pkg = _PACKAGE_MAP.get(pkg, pkg)
92 if pkg in _STDLIBS:
93 continue
94 elif isinstance(pkg, list):
95 packages.extend(pkg)
96 else:
97 packages.append(pkg)
98 if any(pdd in code for pdd in _PANDAS_AUTODETECT) and 'pandas' not in packages:
99 packages.append('pandas')
100 return [pkg for pkg in packages if pkg not in _IGNORED_PKGS]
101
102 #---------------------------------------------------------------------
103 # Execution API
104 #---------------------------------------------------------------------
105
106 class WriteCallbackStream(io.StringIO):
107
108 def __init__(self, on_write=None, escape=True):
109 self._onwrite = on_write
110 self._escape = escape
111 super().__init__()
112
113 def write(self, s):
114 if self._onwrite:
115 self._onwrite(escape(s) if self._escape else s)
116 super().write(s)
117
118 def _convert_expr(expr: ast.Expr) -> ast.Expression:
119 """
120 Converts an ast.Expr to and ast.Expression that can be compiled
121 and evaled.
122 """
123 expr.lineno = 0
124 expr.col_offset = 0
125 return ast.Expression(expr.value, lineno=0, col_offset = 0)
126
127 _OUT_BUFFER = []
128
129 def _display(*objs, **kwargs):
130 """
131 IPython.display compatibility wrapper.
132
133 Note: This only handles a single display.
134 """
135 _OUT_BUFFER.extend(list(objs))
136
137 def exec_with_return(
138 code: str,
139 global_context: Dict[str, Any] = None,
140 stdout: Any = None,
141 stderr: Any = None
142 ) -> Any:
143 """
144 Executes a code snippet and returns the resulting output of the
145 last line.
146
147 Arguments
148 ---------
149 code: str
150 The code to execute
151 global_context: Dict[str, Any]
152 The globals to inject into the execution context.
153 stdout: io.StringIO
154 The stream to redirect stdout to.
155 stderr: io.StringIO
156 The stream to redirect stderr to.
157
158 Returns
159 -------
160
161 The return value of the executed code.
162 """
163 global_context = global_context if global_context else globals()
164 global_context['display'] = _display
165 code_ast = ast.parse(code)
166
167 init_ast = copy.deepcopy(code_ast)
168 init_ast.body = code_ast.body[:-1]
169
170 last_ast = copy.deepcopy(code_ast)
171 last_ast.body = code_ast.body[-1:]
172
173 stdout = stdout or sys.stdout
174 stderr = stderr or sys.stderr
175 with redirect_stdout(stdout), redirect_stderr(stderr):
176 try:
177 exec(compile(init_ast, "<ast>", "exec"), global_context)
178 if not last_ast.body:
179 out = None
180 elif type(last_ast.body[0]) == ast.Expr:
181 out = eval(compile(_convert_expr(last_ast.body[0]), "<ast>", "eval"), global_context)
182 else:
183 exec(compile(last_ast, "<ast>", "exec"), global_context)
184 out = None
185 if code.strip().endswith(';'):
186 out = None
187 if _OUT_BUFFER and out is None:
188 out = _OUT_BUFFER[-1]
189 except Exception:
190 out = None
191 traceback.print_exc(file=stderr)
192 finally:
193 _OUT_BUFFER.clear()
194 return out
195
196 #---------------------------------------------------------------------
197 # MIME Render API
198 #---------------------------------------------------------------------
199
200 MIME_METHODS = {
201 "__repr__": "text/plain",
202 "_repr_html_": "text/html",
203 "_repr_markdown_": "text/markdown",
204 "_repr_svg_": "image/svg+xml",
205 "_repr_png_": "image/png",
206 "_repr_pdf_": "application/pdf",
207 "_repr_jpeg_": "image/jpeg",
208 "_repr_latex": "text/latex",
209 "_repr_json_": "application/json",
210 "_repr_javascript_": "application/javascript",
211 "savefig": "image/png",
212 "to_html": "text/html"
213 }
214
215 # Rendering function
216
217 def render_svg(value, meta, mime):
218 return value, 'text/html'
219
220 def render_image(value, meta, mime):
221 data = f"data:{mime};charset=utf-8;base64,{value}"
222 attrs = " ".join(['{k}="{v}"' for k, v in meta.items()])
223 return f'<img src="{data}" {attrs}</img>', 'text/html'
224
225 def render_javascript(value, meta, mime):
226 return f'<script>{value}</script>', 'text/html'
227
228 def render_markdown(value, meta, mime):
229 return (markdown.markdown(
230 value, extensions=["extra", "smarty", "codehilite"], output_format='html5'
231 ), 'text/html')
232
233 def render_pdf(value, meta, mime):
234 data = value.encode('utf-8')
235 base64_pdf = base64.b64encode(data).decode("utf-8")
236 src = f"data:application/pdf;base64,{base64_pdf}"
237 return f'<embed src="{src}" width="100%" height="100%" type="application/pdf">', 'text/html'
238
239 def identity(value, meta, mime):
240 return value, mime
241
242 MIME_RENDERERS = {
243 "image/png": render_image,
244 "image/jpeg": render_image,
245 "image/svg+xml": identity,
246 "application/json": identity,
247 "application/javascript": render_javascript,
248 "application/pdf": render_pdf,
249 "text/html": identity,
250 "text/markdown": render_markdown,
251 "text/plain": identity,
252 }
253
254 def eval_formatter(obj, print_method):
255 """
256 Evaluates a formatter method.
257 """
258 if print_method == "__repr__":
259 return repr(obj)
260 elif hasattr(obj, print_method):
261 if print_method == "savefig":
262 buf = io.BytesIO()
263 obj.savefig(buf, format="png")
264 buf.seek(0)
265 return base64.b64encode(buf.read()).decode("utf-8")
266 return getattr(obj, print_method)()
267 elif print_method == "_repr_mimebundle_":
268 return {}, {}
269 return None
270
271 def format_mime(obj):
272 """
273 Formats object using _repr_x_ methods.
274 """
275 if isinstance(obj, str):
276 return escape(obj), "text/plain"
277 mimebundle = eval_formatter(obj, "_repr_mimebundle_")
278 if isinstance(mimebundle, tuple):
279 format_dict, _ = mimebundle
280 else:
281 format_dict = mimebundle
282
283 output, not_available = None, []
284 for method, mime_type in reversed(list(MIME_METHODS.items())):
285 if mime_type in format_dict:
286 output = format_dict[mime_type]
287 elif isinstance(obj, type) and method != '__repr__':
288 output = None
289 else:
290 output = eval_formatter(obj, method)
291
292 if output is None:
293 continue
294 elif mime_type not in MIME_RENDERERS:
295 not_available.append(mime_type)
296 continue
297 break
298 if output is None:
299 output = repr(output)
300 mime_type = "text/plain"
301 elif isinstance(output, tuple):
302 output, meta = output
303 else:
304 meta = {}
305 content, mime_type = MIME_RENDERERS[mime_type](output, meta, mime_type)
306 if mime_type == 'text/plain':
307 content = escape(content)
308 return content, mime_type
309
[end of panel/io/mime_render.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/io/mime_render.py b/panel/io/mime_render.py
--- a/panel/io/mime_render.py
+++ b/panel/io/mime_render.py
@@ -34,6 +34,8 @@
#---------------------------------------------------------------------
def _stdlibs():
+ if sys.version_info[:2] >= (3, 10):
+ return sys.stdlib_module_names
env_dir = str(pathlib.Path(sys.executable).parent.parent)
modules = list(sys.builtin_module_names)
for m in pkgutil.iter_modules():
| {"golden_diff": "diff --git a/panel/io/mime_render.py b/panel/io/mime_render.py\n--- a/panel/io/mime_render.py\n+++ b/panel/io/mime_render.py\n@@ -34,6 +34,8 @@\n #---------------------------------------------------------------------\n \n def _stdlibs():\n+ if sys.version_info[:2] >= (3, 10):\n+ return sys.stdlib_module_names\n env_dir = str(pathlib.Path(sys.executable).parent.parent)\n modules = list(sys.builtin_module_names)\n for m in pkgutil.iter_modules():\n", "issue": "find_requirements includes datetime and random\nI'm on the current `main` branch of Panel.\r\n\r\nWhen I use `find_requirements` like below I get \"packages\" included like `datetime` and `random` from the `stdlib`.\r\n\r\n```python\r\nfrom panel.io.convert import find_requirements\r\ncode = \"\"\"\r\nimport panel as pn\r\nimport pandas as pd\r\n\r\nimport random\r\nfrom datetime import datetime, timedelta\r\n\"\"\"\r\n\r\nrequirements = find_requirements(code)\r\nprint(requirements)\r\n```\r\n\r\n```bash\r\n$ python app.py\r\n['datetime', 'pandas', 'panel', 'random']\r\n```\n", "before_files": [{"content": "\"\"\"\nUtilities for executing Python code and rendering the resulting output\nusing a similar MIME-type based rendering system as implemented by\nIPython.\n\nAttempts to limit the actual MIME types that have to be rendered on\nto a minimum simplifying frontend implementation:\n\n - application/bokeh: Model JSON representation\n - text/plain: HTML escaped string output\n - text/html: HTML code to insert into the DOM\n\"\"\"\n\nfrom __future__ import annotations\n\nimport ast\nimport base64\nimport copy\nimport io\nimport pathlib\nimport pkgutil\nimport sys\nimport traceback\n\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom html import escape\nfrom textwrap import dedent\nfrom typing import Any, Dict, List\n\nimport markdown\n\n#---------------------------------------------------------------------\n# Import API\n#---------------------------------------------------------------------\n\ndef _stdlibs():\n env_dir = str(pathlib.Path(sys.executable).parent.parent)\n modules = list(sys.builtin_module_names)\n for m in pkgutil.iter_modules():\n mpath = getattr(m.module_finder, 'path', '')\n if mpath.startswith(env_dir) and 'site-packages' not in mpath:\n modules.append(m.name)\n return modules\n\n_STDLIBS = _stdlibs()\n_PACKAGE_MAP = {\n 'sklearn': 'scikit-learn',\n 'transformers_js': 'transformers-js-py',\n}\n_IGNORED_PKGS = ['js', 'pyodide']\n_PANDAS_AUTODETECT = ['bokeh.sampledata', 'as_frame']\n\ndef find_requirements(code: str) -> List[str]:\n \"\"\"\n Finds the packages required in a string of code.\n\n Parameters\n ----------\n code : str\n the Python code to run.\n\n Returns\n -------\n ``List[str]``\n A list of package names that are to be installed for the code to be able to run.\n\n Examples\n --------\n >>> code = \"import numpy as np; import scipy.stats\"\n >>> find_imports(code)\n ['numpy', 'scipy']\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n node_name = name.name\n imports.add(node_name.split(\".\")[0])\n elif isinstance(node, ast.ImportFrom):\n module_name = node.module\n if module_name is None:\n continue\n imports.add(module_name.split(\".\")[0])\n\n packages = []\n for pkg in sorted(imports):\n pkg = _PACKAGE_MAP.get(pkg, pkg)\n if pkg in _STDLIBS:\n continue\n elif isinstance(pkg, list):\n packages.extend(pkg)\n else:\n packages.append(pkg)\n if any(pdd in code for pdd in _PANDAS_AUTODETECT) and 'pandas' not in packages:\n packages.append('pandas')\n return [pkg for pkg in packages if pkg not in _IGNORED_PKGS]\n\n#---------------------------------------------------------------------\n# Execution API\n#---------------------------------------------------------------------\n\nclass WriteCallbackStream(io.StringIO):\n\n def __init__(self, on_write=None, escape=True):\n self._onwrite = on_write\n self._escape = escape\n super().__init__()\n\n def write(self, s):\n if self._onwrite:\n self._onwrite(escape(s) if self._escape else s)\n super().write(s)\n\ndef _convert_expr(expr: ast.Expr) -> ast.Expression:\n \"\"\"\n Converts an ast.Expr to and ast.Expression that can be compiled\n and evaled.\n \"\"\"\n expr.lineno = 0\n expr.col_offset = 0\n return ast.Expression(expr.value, lineno=0, col_offset = 0)\n\n_OUT_BUFFER = []\n\ndef _display(*objs, **kwargs):\n \"\"\"\n IPython.display compatibility wrapper.\n\n Note: This only handles a single display.\n \"\"\"\n _OUT_BUFFER.extend(list(objs))\n\ndef exec_with_return(\n code: str,\n global_context: Dict[str, Any] = None,\n stdout: Any = None,\n stderr: Any = None\n) -> Any:\n \"\"\"\n Executes a code snippet and returns the resulting output of the\n last line.\n\n Arguments\n ---------\n code: str\n The code to execute\n global_context: Dict[str, Any]\n The globals to inject into the execution context.\n stdout: io.StringIO\n The stream to redirect stdout to.\n stderr: io.StringIO\n The stream to redirect stderr to.\n\n Returns\n -------\n\n The return value of the executed code.\n \"\"\"\n global_context = global_context if global_context else globals()\n global_context['display'] = _display\n code_ast = ast.parse(code)\n\n init_ast = copy.deepcopy(code_ast)\n init_ast.body = code_ast.body[:-1]\n\n last_ast = copy.deepcopy(code_ast)\n last_ast.body = code_ast.body[-1:]\n\n stdout = stdout or sys.stdout\n stderr = stderr or sys.stderr\n with redirect_stdout(stdout), redirect_stderr(stderr):\n try:\n exec(compile(init_ast, \"<ast>\", \"exec\"), global_context)\n if not last_ast.body:\n out = None\n elif type(last_ast.body[0]) == ast.Expr:\n out = eval(compile(_convert_expr(last_ast.body[0]), \"<ast>\", \"eval\"), global_context)\n else:\n exec(compile(last_ast, \"<ast>\", \"exec\"), global_context)\n out = None\n if code.strip().endswith(';'):\n out = None\n if _OUT_BUFFER and out is None:\n out = _OUT_BUFFER[-1]\n except Exception:\n out = None\n traceback.print_exc(file=stderr)\n finally:\n _OUT_BUFFER.clear()\n return out\n\n#---------------------------------------------------------------------\n# MIME Render API\n#---------------------------------------------------------------------\n\nMIME_METHODS = {\n \"__repr__\": \"text/plain\",\n \"_repr_html_\": \"text/html\",\n \"_repr_markdown_\": \"text/markdown\",\n \"_repr_svg_\": \"image/svg+xml\",\n \"_repr_png_\": \"image/png\",\n \"_repr_pdf_\": \"application/pdf\",\n \"_repr_jpeg_\": \"image/jpeg\",\n \"_repr_latex\": \"text/latex\",\n \"_repr_json_\": \"application/json\",\n \"_repr_javascript_\": \"application/javascript\",\n \"savefig\": \"image/png\",\n \"to_html\": \"text/html\"\n}\n\n# Rendering function\n\ndef render_svg(value, meta, mime):\n return value, 'text/html'\n\ndef render_image(value, meta, mime):\n data = f\"data:{mime};charset=utf-8;base64,{value}\"\n attrs = \" \".join(['{k}=\"{v}\"' for k, v in meta.items()])\n return f'<img src=\"{data}\" {attrs}</img>', 'text/html'\n\ndef render_javascript(value, meta, mime):\n return f'<script>{value}</script>', 'text/html'\n\ndef render_markdown(value, meta, mime):\n return (markdown.markdown(\n value, extensions=[\"extra\", \"smarty\", \"codehilite\"], output_format='html5'\n ), 'text/html')\n\ndef render_pdf(value, meta, mime):\n data = value.encode('utf-8')\n base64_pdf = base64.b64encode(data).decode(\"utf-8\")\n src = f\"data:application/pdf;base64,{base64_pdf}\"\n return f'<embed src=\"{src}\" width=\"100%\" height=\"100%\" type=\"application/pdf\">', 'text/html'\n\ndef identity(value, meta, mime):\n return value, mime\n\nMIME_RENDERERS = {\n \"image/png\": render_image,\n \"image/jpeg\": render_image,\n \"image/svg+xml\": identity,\n \"application/json\": identity,\n \"application/javascript\": render_javascript,\n \"application/pdf\": render_pdf,\n \"text/html\": identity,\n \"text/markdown\": render_markdown,\n \"text/plain\": identity,\n}\n\ndef eval_formatter(obj, print_method):\n \"\"\"\n Evaluates a formatter method.\n \"\"\"\n if print_method == \"__repr__\":\n return repr(obj)\n elif hasattr(obj, print_method):\n if print_method == \"savefig\":\n buf = io.BytesIO()\n obj.savefig(buf, format=\"png\")\n buf.seek(0)\n return base64.b64encode(buf.read()).decode(\"utf-8\")\n return getattr(obj, print_method)()\n elif print_method == \"_repr_mimebundle_\":\n return {}, {}\n return None\n\ndef format_mime(obj):\n \"\"\"\n Formats object using _repr_x_ methods.\n \"\"\"\n if isinstance(obj, str):\n return escape(obj), \"text/plain\"\n mimebundle = eval_formatter(obj, \"_repr_mimebundle_\")\n if isinstance(mimebundle, tuple):\n format_dict, _ = mimebundle\n else:\n format_dict = mimebundle\n\n output, not_available = None, []\n for method, mime_type in reversed(list(MIME_METHODS.items())):\n if mime_type in format_dict:\n output = format_dict[mime_type]\n elif isinstance(obj, type) and method != '__repr__':\n output = None\n else:\n output = eval_formatter(obj, method)\n\n if output is None:\n continue\n elif mime_type not in MIME_RENDERERS:\n not_available.append(mime_type)\n continue\n break\n if output is None:\n output = repr(output)\n mime_type = \"text/plain\"\n elif isinstance(output, tuple):\n output, meta = output\n else:\n meta = {}\n content, mime_type = MIME_RENDERERS[mime_type](output, meta, mime_type)\n if mime_type == 'text/plain':\n content = escape(content)\n return content, mime_type\n", "path": "panel/io/mime_render.py"}]} | 3,607 | 121 |
gh_patches_debug_5667 | rasdani/github-patches | git_diff | ivy-llc__ivy-16244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
softshrink
</issue>
<code>
[start of ivy/functional/frontends/paddle/nn/functional/activation.py]
1 # local
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5 from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh
6 from ivy.functional.frontends.paddle.tensor.math import (
7 log_softmax as paddle_log_softmax,
8 )
9
10
11 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
12 @to_ivy_arrays_and_back
13 def selu(
14 x,
15 /,
16 *,
17 alpha=1.6732632423543772848170429916717,
18 scale=1.0507009873554804934193349852946,
19 name=None,
20 ):
21 if scale <= 1.0:
22 raise ValueError(f"The scale must be greater than 1.0. Received: {scale}.")
23
24 if alpha < 0:
25 raise ValueError(f"The alpha must be no less than zero. Received: {alpha}.")
26
27 ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))
28 arr = scale * ret
29 return ivy.astype(arr, x.dtype)
30
31
32 tanh = paddle_tanh
33 log_softmax = paddle_log_softmax
34
35
36 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
37 @to_ivy_arrays_and_back
38 def hardshrink(x, threshold=0.5, name=None):
39 mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))
40 return ivy.where(mask, x, 0.0)
41
42
43 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
44 @to_ivy_arrays_and_back
45 def hardtanh(
46 x,
47 /,
48 *,
49 min=-1.0,
50 max=1.0,
51 name=None,
52 ):
53 less = ivy.where(ivy.less(x, min), min, x)
54 ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)
55 return ret
56
57
58 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
59 @to_ivy_arrays_and_back
60 def gelu(x, approximate=False, name=None):
61 return ivy.gelu(x, approximate=approximate)
62
63
64 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
65 @to_ivy_arrays_and_back
66 def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
67 ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)
68 return ret
69
70
71 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
72 @to_ivy_arrays_and_back
73 def relu6(x, name=None):
74 return ivy.relu6(x)
75
[end of ivy/functional/frontends/paddle/nn/functional/activation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py
--- a/ivy/functional/frontends/paddle/nn/functional/activation.py
+++ b/ivy/functional/frontends/paddle/nn/functional/activation.py
@@ -72,3 +72,18 @@
@to_ivy_arrays_and_back
def relu6(x, name=None):
return ivy.relu6(x)
+
+
+@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
+@to_ivy_arrays_and_back
+def softshrink(
+ x,
+ /,
+ *,
+ threshold=0.5,
+ name=None,
+):
+ low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)
+ up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)
+ add = ivy.add(low, up)
+ return ivy.astype(add, x.dtype)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py\n--- a/ivy/functional/frontends/paddle/nn/functional/activation.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/activation.py\n@@ -72,3 +72,18 @@\n @to_ivy_arrays_and_back\n def relu6(x, name=None):\n return ivy.relu6(x)\n+\n+\n+@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def softshrink(\n+ x,\n+ /,\n+ *,\n+ threshold=0.5,\n+ name=None,\n+):\n+ low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)\n+ up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)\n+ add = ivy.add(low, up)\n+ return ivy.astype(add, x.dtype)\n", "issue": "softshrink\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\nfrom ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef selu(\n x,\n /,\n *,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n name=None,\n):\n if scale <= 1.0:\n raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n\n if alpha < 0:\n raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n\n ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n arr = scale * ret\n return ivy.astype(arr, x.dtype)\n\n\ntanh = paddle_tanh\nlog_softmax = paddle_log_softmax\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardshrink(x, threshold=0.5, name=None):\n mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))\n return ivy.where(mask, x, 0.0)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardtanh(\n x,\n /,\n *,\n min=-1.0,\n max=1.0,\n name=None,\n):\n less = ivy.where(ivy.less(x, min), min, x)\n ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)\n return ret\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gelu(x, approximate=False, name=None):\n return ivy.gelu(x, approximate=approximate)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):\n ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)\n return ret\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu6(x, name=None):\n return ivy.relu6(x)\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py"}]} | 1,438 | 254 |
gh_patches_debug_53637 | rasdani/github-patches | git_diff | Kinto__kinto-797 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
init fails on osx at the backend selection step
I followed the instructions given [here](https://kinto.readthedocs.io/en/stable/tutorials/install.html#from-sources), but when asked:
```
$ Select the backend you would like to use: (1 - postgresql, 2 - redis, default - memory)
```
entering `1` and `2` leads to the following error:
```
Traceback (most recent call last):
File ".venv/bin/kinto", line 11, in <module>
load_entry_point('kinto', 'console_scripts', 'kinto')()
File "/work/git/kinto/kinto/__main__.py", line 108, in main
answer = input(prompt).strip()
AttributeError: 'int' object has no attribute 'strip'
```
and entering nothing + enter will lead to the following error.
```
Traceback (most recent call last):
File ".venv/bin/kinto", line 11, in <module>
load_entry_point('kinto', 'console_scripts', 'kinto')()
File "/work/git/kinto/kinto/__main__.py", line 108, in main
answer = input(prompt).strip()
File "<string>", line 0
^
SyntaxError: unexpected EOF while parsing
```
It appears that the code expects a `string` but getting a number and null, therefore failing on the `.strip()` call [here](https://github.com/Kinto/kinto/blob/master/kinto/__main__.py#L108).
---
Entering `""`, `"1"` and `"2"` works. I'm assuming that's not the way it's designed to be?
</issue>
<code>
[start of kinto/__main__.py]
1 from __future__ import print_function
2 import argparse
3 import os
4 import sys
5 import logging
6 import logging.config
7
8 from kinto.core import scripts
9 from pyramid.scripts import pserve
10 from pyramid.paster import bootstrap
11 from kinto import __version__
12 from kinto.config import init
13
14 DEFAULT_CONFIG_FILE = 'config/kinto.ini'
15 DEFAULT_PORT = 8888
16 DEFAULT_LOG_LEVEL = logging.INFO
17 DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s"
18
19
20 def main(args=None):
21 """The main routine."""
22 if args is None:
23 args = sys.argv[1:]
24
25 parser = argparse.ArgumentParser(description="Kinto Command-Line "
26 "Interface")
27 # XXX: deprecate this option, unnatural as first argument.
28 parser.add_argument('--ini',
29 help='Application configuration file',
30 dest='ini_file',
31 required=False,
32 default=DEFAULT_CONFIG_FILE)
33
34 parser.add_argument('-q', '--quiet', action='store_const',
35 const=logging.CRITICAL, dest='verbosity',
36 help='Show only critical errors.')
37
38 parser.add_argument('--debug', action='store_const',
39 const=logging.DEBUG, dest='verbosity',
40 help='Show all messages, including debug messages.')
41
42 commands = ('init', 'start', 'migrate', 'delete-collection', 'version')
43 subparsers = parser.add_subparsers(title='subcommands',
44 description='Main Kinto CLI commands',
45 dest='subcommand',
46 help="Choose and run with --help")
47 subparsers.required = True
48
49 for command in commands:
50 subparser = subparsers.add_parser(command)
51 subparser.set_defaults(which=command)
52
53 if command == 'init':
54 subparser.add_argument('--backend',
55 help='{memory,redis,postgresql}',
56 dest='backend',
57 required=False,
58 default=None)
59 elif command == 'migrate':
60 subparser.add_argument('--dry-run',
61 action='store_true',
62 help='Simulate the migration operations '
63 'and show information',
64 dest='dry_run',
65 required=False,
66 default=False)
67 elif command == 'delete-collection':
68 subparser.add_argument('--bucket',
69 help='The bucket where the collection '
70 'belongs to.',
71 required=True)
72 subparser.add_argument('--collection',
73 help='The collection to remove.',
74 required=True)
75
76 elif command == 'start':
77 subparser.add_argument('--reload',
78 action='store_true',
79 help='Restart when code or config changes',
80 required=False,
81 default=False)
82 subparser.add_argument('--port',
83 type=int,
84 help='Listening port number',
85 required=False,
86 default=DEFAULT_PORT)
87
88 # Parse command-line arguments
89 parsed_args = vars(parser.parse_args(args))
90
91 config_file = parsed_args['ini_file']
92 which_command = parsed_args['which']
93
94 # Initialize logging from
95 level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL
96 logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)
97
98 if which_command == 'init':
99 if os.path.exists(config_file):
100 print("%s already exists." % config_file, file=sys.stderr)
101 return 1
102
103 backend = parsed_args['backend']
104 if not backend:
105 while True:
106 prompt = ("Select the backend you would like to use: "
107 "(1 - postgresql, 2 - redis, default - memory) ")
108 answer = input(prompt).strip()
109 try:
110 backends = {"1": "postgresql", "2": "redis", "": "memory"}
111 backend = backends[answer]
112 break
113 except KeyError:
114 pass
115
116 init(config_file, backend)
117
118 # Install postgresql libraries if necessary
119 if backend == "postgresql":
120 try:
121 import psycopg2 # NOQA
122 except ImportError:
123 import pip
124 pip.main(['install', "kinto[postgresql]"])
125 elif backend == "redis":
126 try:
127 import kinto_redis # NOQA
128 except ImportError:
129 import pip
130 pip.main(['install', "kinto[redis]"])
131
132 elif which_command == 'migrate':
133 dry_run = parsed_args['dry_run']
134 env = bootstrap(config_file)
135 scripts.migrate(env, dry_run=dry_run)
136
137 elif which_command == 'delete-collection':
138 env = bootstrap(config_file)
139 return scripts.delete_collection(env,
140 parsed_args['bucket'],
141 parsed_args['collection'])
142
143 elif which_command == 'start':
144 pserve_argv = ['pserve', config_file]
145 if parsed_args['reload']:
146 pserve_argv.append('--reload')
147 pserve_argv.append('http_port=%s' % parsed_args['port'])
148 pserve.main(pserve_argv)
149
150 elif which_command == 'version':
151 print(__version__)
152
153 return 0
154
[end of kinto/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -4,6 +4,7 @@
import sys
import logging
import logging.config
+from six.moves import input
from kinto.core import scripts
from pyramid.scripts import pserve
| {"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -4,6 +4,7 @@\n import sys\n import logging\n import logging.config\n+from six.moves import input\n \n from kinto.core import scripts\n from pyramid.scripts import pserve\n", "issue": "init fails on osx at the backend selection step\nI followed the instructions given [here](https://kinto.readthedocs.io/en/stable/tutorials/install.html#from-sources), but when asked:\n\n```\n$ Select the backend you would like to use: (1 - postgresql, 2 - redis, default - memory)\n```\n\nentering `1` and `2` leads to the following error:\n\n```\nTraceback (most recent call last):\n File \".venv/bin/kinto\", line 11, in <module>\n load_entry_point('kinto', 'console_scripts', 'kinto')()\n File \"/work/git/kinto/kinto/__main__.py\", line 108, in main\n answer = input(prompt).strip()\nAttributeError: 'int' object has no attribute 'strip'\n```\n\nand entering nothing + enter will lead to the following error.\n\n```\nTraceback (most recent call last):\n File \".venv/bin/kinto\", line 11, in <module>\n load_entry_point('kinto', 'console_scripts', 'kinto')()\n File \"/work/git/kinto/kinto/__main__.py\", line 108, in main\n answer = input(prompt).strip()\n File \"<string>\", line 0\n\n ^\nSyntaxError: unexpected EOF while parsing\n```\n\nIt appears that the code expects a `string` but getting a number and null, therefore failing on the `.strip()` call [here](https://github.com/Kinto/kinto/blob/master/kinto/__main__.py#L108).\n\n---\n\nEntering `\"\"`, `\"1\"` and `\"2\"` works. I'm assuming that's not the way it's designed to be? \n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport logging\nimport logging.config\n\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = 'config/kinto.ini'\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \"\n \"Interface\")\n # XXX: deprecate this option, unnatural as first argument.\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=DEFAULT_CONFIG_FILE)\n\n parser.add_argument('-q', '--quiet', action='store_const',\n const=logging.CRITICAL, dest='verbosity',\n help='Show only critical errors.')\n\n parser.add_argument('--debug', action='store_const',\n const=logging.DEBUG, dest='verbosity',\n help='Show all messages, including debug messages.')\n\n commands = ('init', 'start', 'migrate', 'delete-collection', 'version')\n subparsers = parser.add_subparsers(title='subcommands',\n description='Main Kinto CLI commands',\n dest='subcommand',\n help=\"Choose and run with --help\")\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n if command == 'init':\n subparser.add_argument('--backend',\n help='{memory,redis,postgresql}',\n dest='backend',\n required=False,\n default=None)\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n help='Simulate the migration operations '\n 'and show information',\n dest='dry_run',\n required=False,\n default=False)\n elif command == 'delete-collection':\n subparser.add_argument('--bucket',\n help='The bucket where the collection '\n 'belongs to.',\n required=True)\n subparser.add_argument('--collection',\n help='The collection to remove.',\n required=True)\n\n elif command == 'start':\n subparser.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n subparser.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=DEFAULT_PORT)\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n which_command = parsed_args['which']\n\n # Initialize logging from\n level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n elif backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[redis]\"])\n\n elif which_command == 'migrate':\n dry_run = parsed_args['dry_run']\n env = bootstrap(config_file)\n scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == 'delete-collection':\n env = bootstrap(config_file)\n return scripts.delete_collection(env,\n parsed_args['bucket'],\n parsed_args['collection'])\n\n elif which_command == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n elif which_command == 'version':\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}]} | 2,286 | 78 |
gh_patches_debug_59317 | rasdani/github-patches | git_diff | chainer__chainer-1178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HuberLoss's backward() should not ignore gy
`HuberLoss`'s `backward()` ignores `gy`, so any computation after `HuberLoss` has no effect for gradients backpropagated by `HuberLoss`. I think such a behavior is not correct.
``` python
>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))
>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))
>>> F.huber_loss(x, t, 1.0).backward()
>>> x.grad
array([[-1.]], dtype=float32)
```
``` python
>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))
>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))
>>> (F.huber_loss(x, t, 1.0) * 0).backward() # Multiply the loss by zero
>>> x.grad
array([[-1.]], dtype=float32)
```
</issue>
<code>
[start of chainer/functions/loss/huber_loss.py]
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class HuberLoss(function.Function):
9
10 def __init__(self, delta):
11 self.delta = delta
12
13 def check_type_forward(self, in_types):
14 type_check.expect(in_types.size() == 2)
15 type_check.expect(
16 in_types[0].dtype == numpy.float32,
17 in_types[1].dtype == numpy.float32,
18 in_types[0].shape == in_types[1].shape
19 )
20
21 def forward(self, inputs):
22 xp = cuda.get_array_module(*inputs)
23 x0, x1 = inputs
24 self.diff = x0 - x1
25 y = xp.square(self.diff)
26 mask = y > (self.delta ** 2)
27 y -= mask * xp.square(abs(self.diff) - self.delta)
28 y *= 0.5
29 return y.sum(axis=1),
30
31 def backward(self, inputs, gy):
32 xp = cuda.get_array_module(*inputs)
33 mask = xp.abs(self.diff) <= self.delta
34 gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
35 return gx, -gx
36
37
38 def huber_loss(x, t, delta):
39 """Loss function which is less sensitive to outliers in data than MSE.
40
41 .. math::
42 a = x - t
43
44 and
45
46 .. math::
47 L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
48 \\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
49 \\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
50 \\end{array} \\right.
51
52 Args:
53 x (~chainer.Variable): Input variable.
54 The shape of ``x`` should be (:math:`N`, :math:`K`).
55 t (~chainer.Variable): Target variable for regression.
56 The shape of ``t`` should be (:math:`N`, :math:`K`).
57 delta (float): Constant variable for huber loss function
58 as used in definition.
59
60 Returns:
61 ~chainer.Variable: A variable object holding a scalar array of the
62 huber loss :math:`L_{\\delta}`.
63
64 See:
65 `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
66
67 """
68 return HuberLoss(delta=delta)(x, t)
69
[end of chainer/functions/loss/huber_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/loss/huber_loss.py b/chainer/functions/loss/huber_loss.py
--- a/chainer/functions/loss/huber_loss.py
+++ b/chainer/functions/loss/huber_loss.py
@@ -31,7 +31,8 @@
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
mask = xp.abs(self.diff) <= self.delta
- gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
+ gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \
+ xp.where(mask, self.diff, self.delta * xp.sign(self.diff))
return gx, -gx
| {"golden_diff": "diff --git a/chainer/functions/loss/huber_loss.py b/chainer/functions/loss/huber_loss.py\n--- a/chainer/functions/loss/huber_loss.py\n+++ b/chainer/functions/loss/huber_loss.py\n@@ -31,7 +31,8 @@\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n- gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n+ gx = gy[0].reshape(gy[0].shape + (1,) * (self.diff.ndim - 1)) * \\\n+ xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n", "issue": "HuberLoss's backward() should not ignore gy\n`HuberLoss`'s `backward()` ignores `gy`, so any computation after `HuberLoss` has no effect for gradients backpropagated by `HuberLoss`. I think such a behavior is not correct.\n\n``` python\n>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))\n>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))\n>>> F.huber_loss(x, t, 1.0).backward()\n>>> x.grad\narray([[-1.]], dtype=float32)\n```\n\n``` python\n>>> x = chainer.Variable(np.zeros((1,1), dtype=np.float32))\n>>> t = chainer.Variable(np.ones((1,1), dtype=np.float32))\n>>> (F.huber_loss(x, t, 1.0) * 0).backward() # Multiply the loss by zero\n>>> x.grad\narray([[-1.]], dtype=float32)\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass HuberLoss(function.Function):\n\n def __init__(self, delta):\n self.delta = delta\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(\n in_types[0].dtype == numpy.float32,\n in_types[1].dtype == numpy.float32,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n x0, x1 = inputs\n self.diff = x0 - x1\n y = xp.square(self.diff)\n mask = y > (self.delta ** 2)\n y -= mask * xp.square(abs(self.diff) - self.delta)\n y *= 0.5\n return y.sum(axis=1),\n\n def backward(self, inputs, gy):\n xp = cuda.get_array_module(*inputs)\n mask = xp.abs(self.diff) <= self.delta\n gx = xp.where(mask, self.diff, self.delta * xp.sign(self.diff))\n return gx, -gx\n\n\ndef huber_loss(x, t, delta):\n \"\"\"Loss function which is less sensitive to outliers in data than MSE.\n\n .. math::\n a = x - t\n\n and\n\n .. math::\n L_{\\\\delta}(a) = \\\\left \\\\{ \\\\begin{array}{cc}\n \\\\frac{1}{2} a^2 & {\\\\rm if~|a| \\\\leq \\\\delta} \\\\\\\\\n \\\\delta (|a| - \\\\frac{1}{2} \\\\delta) & {\\\\rm otherwise,}\n \\\\end{array} \\\\right.\n\n Args:\n x (~chainer.Variable): Input variable.\n The shape of ``x`` should be (:math:`N`, :math:`K`).\n t (~chainer.Variable): Target variable for regression.\n The shape of ``t`` should be (:math:`N`, :math:`K`).\n delta (float): Constant variable for huber loss function\n as used in definition.\n\n Returns:\n ~chainer.Variable: A variable object holding a scalar array of the\n huber loss :math:`L_{\\\\delta}`.\n\n See:\n `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.\n\n \"\"\"\n return HuberLoss(delta=delta)(x, t)\n", "path": "chainer/functions/loss/huber_loss.py"}]} | 1,446 | 164 |
gh_patches_debug_36731 | rasdani/github-patches | git_diff | google__flax-3385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve Early Stopping API
See discussion in #2090
</issue>
<code>
[start of flax/training/early_stopping.py]
1 # Copyright 2023 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Early stopping."""
16
17 import math
18 from flax import struct
19
20
21 class EarlyStopping(struct.PyTreeNode):
22 """Early stopping to avoid overfitting during training.
23
24 The following example stops training early if the difference between losses
25 recorded in the current epoch and previous epoch is less than 1e-3
26 consecutively for 2 times::
27
28 early_stop = EarlyStopping(min_delta=1e-3, patience=2)
29 for epoch in range(1, num_epochs+1):
30 rng, input_rng = jax.random.split(rng)
31 optimizer, train_metrics = train_epoch(
32 optimizer, train_ds, config.batch_size, epoch, input_rng)
33 _, early_stop = early_stop.update(train_metrics['loss'])
34 if early_stop.should_stop:
35 print('Met early stopping criteria, breaking...')
36 break
37
38 Attributes:
39 min_delta: Minimum delta between updates to be considered an
40 improvement.
41 patience: Number of steps of no improvement before stopping.
42 best_metric: Current best metric value.
43 patience_count: Number of steps since last improving update.
44 should_stop: Whether the training loop should stop to avoid
45 overfitting.
46 """
47
48 min_delta: float = 0
49 patience: int = 0
50 best_metric: float = float('inf')
51 patience_count: int = 0
52 should_stop: bool = False
53
54 def reset(self):
55 return self.replace(
56 best_metric=float('inf'), patience_count=0, should_stop=False
57 )
58
59 def update(self, metric):
60 """Update the state based on metric.
61
62 Returns:
63 A pair (has_improved, early_stop), where `has_improved` is True when there
64 was an improvement greater than `min_delta` from the previous
65 `best_metric` and `early_stop` is the updated `EarlyStop` object.
66 """
67
68 if (
69 math.isinf(self.best_metric)
70 or self.best_metric - metric > self.min_delta
71 ):
72 return True, self.replace(best_metric=metric, patience_count=0)
73 else:
74 should_stop = self.patience_count >= self.patience or self.should_stop
75 return False, self.replace(
76 patience_count=self.patience_count + 1, should_stop=should_stop
77 )
78
[end of flax/training/early_stopping.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/training/early_stopping.py b/flax/training/early_stopping.py
--- a/flax/training/early_stopping.py
+++ b/flax/training/early_stopping.py
@@ -30,7 +30,7 @@
rng, input_rng = jax.random.split(rng)
optimizer, train_metrics = train_epoch(
optimizer, train_ds, config.batch_size, epoch, input_rng)
- _, early_stop = early_stop.update(train_metrics['loss'])
+ early_stop = early_stop.update(train_metrics['loss'])
if early_stop.should_stop:
print('Met early stopping criteria, breaking...')
break
@@ -43,6 +43,8 @@
patience_count: Number of steps since last improving update.
should_stop: Whether the training loop should stop to avoid
overfitting.
+ has_improved: Whether the metric has improved greater or
+ equal to the min_delta in the last `.update` call.
"""
min_delta: float = 0
@@ -50,28 +52,29 @@
best_metric: float = float('inf')
patience_count: int = 0
should_stop: bool = False
+ has_improved: bool = False
def reset(self):
return self.replace(
- best_metric=float('inf'), patience_count=0, should_stop=False
+ best_metric=float('inf'), patience_count=0, should_stop=False, has_improved=False
)
def update(self, metric):
"""Update the state based on metric.
Returns:
- A pair (has_improved, early_stop), where `has_improved` is True when there
- was an improvement greater than `min_delta` from the previous
- `best_metric` and `early_stop` is the updated `EarlyStop` object.
+ The updated EarlyStopping class. The `.has_improved` attribute is True
+ when there was an improvement greater than `min_delta` from the previous
+ `best_metric`.
"""
if (
math.isinf(self.best_metric)
or self.best_metric - metric > self.min_delta
):
- return True, self.replace(best_metric=metric, patience_count=0)
+ return self.replace(best_metric=metric, patience_count=0, has_improved=True)
else:
should_stop = self.patience_count >= self.patience or self.should_stop
- return False, self.replace(
- patience_count=self.patience_count + 1, should_stop=should_stop
+ return self.replace(
+ patience_count=self.patience_count + 1, should_stop=should_stop, has_improved=False
)
| {"golden_diff": "diff --git a/flax/training/early_stopping.py b/flax/training/early_stopping.py\n--- a/flax/training/early_stopping.py\n+++ b/flax/training/early_stopping.py\n@@ -30,7 +30,7 @@\n rng, input_rng = jax.random.split(rng)\n optimizer, train_metrics = train_epoch(\n optimizer, train_ds, config.batch_size, epoch, input_rng)\n- _, early_stop = early_stop.update(train_metrics['loss'])\n+ early_stop = early_stop.update(train_metrics['loss'])\n if early_stop.should_stop:\n print('Met early stopping criteria, breaking...')\n break\n@@ -43,6 +43,8 @@\n patience_count: Number of steps since last improving update.\n should_stop: Whether the training loop should stop to avoid\n overfitting.\n+ has_improved: Whether the metric has improved greater or\n+ equal to the min_delta in the last `.update` call.\n \"\"\"\n \n min_delta: float = 0\n@@ -50,28 +52,29 @@\n best_metric: float = float('inf')\n patience_count: int = 0\n should_stop: bool = False\n+ has_improved: bool = False\n \n def reset(self):\n return self.replace(\n- best_metric=float('inf'), patience_count=0, should_stop=False\n+ best_metric=float('inf'), patience_count=0, should_stop=False, has_improved=False\n )\n \n def update(self, metric):\n \"\"\"Update the state based on metric.\n \n Returns:\n- A pair (has_improved, early_stop), where `has_improved` is True when there\n- was an improvement greater than `min_delta` from the previous\n- `best_metric` and `early_stop` is the updated `EarlyStop` object.\n+ The updated EarlyStopping class. The `.has_improved` attribute is True\n+ when there was an improvement greater than `min_delta` from the previous\n+ `best_metric`.\n \"\"\"\n \n if (\n math.isinf(self.best_metric)\n or self.best_metric - metric > self.min_delta\n ):\n- return True, self.replace(best_metric=metric, patience_count=0)\n+ return self.replace(best_metric=metric, patience_count=0, has_improved=True)\n else:\n should_stop = self.patience_count >= self.patience or self.should_stop\n- return False, self.replace(\n- patience_count=self.patience_count + 1, should_stop=should_stop\n+ return self.replace(\n+ patience_count=self.patience_count + 1, should_stop=should_stop, has_improved=False\n )\n", "issue": "Improve Early Stopping API\nSee discussion in #2090\n", "before_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Early stopping.\"\"\"\n\nimport math\nfrom flax import struct\n\n\nclass EarlyStopping(struct.PyTreeNode):\n \"\"\"Early stopping to avoid overfitting during training.\n\n The following example stops training early if the difference between losses\n recorded in the current epoch and previous epoch is less than 1e-3\n consecutively for 2 times::\n\n early_stop = EarlyStopping(min_delta=1e-3, patience=2)\n for epoch in range(1, num_epochs+1):\n rng, input_rng = jax.random.split(rng)\n optimizer, train_metrics = train_epoch(\n optimizer, train_ds, config.batch_size, epoch, input_rng)\n _, early_stop = early_stop.update(train_metrics['loss'])\n if early_stop.should_stop:\n print('Met early stopping criteria, breaking...')\n break\n\n Attributes:\n min_delta: Minimum delta between updates to be considered an\n improvement.\n patience: Number of steps of no improvement before stopping.\n best_metric: Current best metric value.\n patience_count: Number of steps since last improving update.\n should_stop: Whether the training loop should stop to avoid\n overfitting.\n \"\"\"\n\n min_delta: float = 0\n patience: int = 0\n best_metric: float = float('inf')\n patience_count: int = 0\n should_stop: bool = False\n\n def reset(self):\n return self.replace(\n best_metric=float('inf'), patience_count=0, should_stop=False\n )\n\n def update(self, metric):\n \"\"\"Update the state based on metric.\n\n Returns:\n A pair (has_improved, early_stop), where `has_improved` is True when there\n was an improvement greater than `min_delta` from the previous\n `best_metric` and `early_stop` is the updated `EarlyStop` object.\n \"\"\"\n\n if (\n math.isinf(self.best_metric)\n or self.best_metric - metric > self.min_delta\n ):\n return True, self.replace(best_metric=metric, patience_count=0)\n else:\n should_stop = self.patience_count >= self.patience or self.should_stop\n return False, self.replace(\n patience_count=self.patience_count + 1, should_stop=should_stop\n )\n", "path": "flax/training/early_stopping.py"}]} | 1,330 | 590 |
gh_patches_debug_26326 | rasdani/github-patches | git_diff | PrefectHQ__prefect-9267 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Temporary network connectivity problems cause Worker to stop working
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
Network issues seem to get the Worker into a state where it continues to send heartbeats, but stops running flows, resulting in flows getting stuck in "pending" state.
### Reproduction
```python3
Run a worker and wait until it gets "stuck"
```
### Error
```python3
$ k logs prefect-worker-stable-78d664b779-snmln
/usr/local/lib/python3.11/site-packages/prefect/experimental/cli/worker.py:81: ExperimentalFeature: The workers feature is experimental. The interface or behavior may change without warning, we recommend pinning versions to prevent unexpected changes. To disable warnings for this group of experiments, disable PREFECT_EXPERIMENTAL_WARN_WORKERS.
async with worker_cls(
Worker 'KubernetesWorker 3ee16ee0-5575-4d11-975b-a644c66b2362' started!
Failed the last 3 attempts. Please check your environment and configuration.
Examples of recent errors:
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/h2/connection.py", line 224, in
process_input
func, target_state = self._transitions[(self.state, input_)]
~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^
KeyError: (<ConnectionState.CLOSED: 3>, <ConnectionInputs.SEND_HEADERS: 0>)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/httpcore/_async/http2.py", line
106, in handle_async_request
await self._send_request_headers(request=request, stream_id=stream_id)
File "/usr/local/lib/python3.11/site-packages/httpcore/_async/http2.py", line
203, in _send_request_headers
self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)
File "/usr/local/lib/python3.11/site-packages/h2/connection.py", line 766, in
send_headers
self.state_machine.process_input(ConnectionInputs.SEND_HEADERS)
File "/usr/local/lib/python3.11/site-packages/h2/connection.py", line 228, in
process_input
raise ProtocolError(
h2.exceptions.ProtocolError: Invalid input ConnectionInputs.SEND_HEADERS in
state ConnectionState.CLOSED
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py",
line 60, in map_httpcore_exceptions
yield
File "/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py",
line 353, in handle_async_request
resp = await self._pool.handle_async_request(req)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File
"/usr/local/lib/python3.11/site-packages/httpcore/_async/connection_pool.py",
line 253, in handle_async_request
raise exc
File
"/usr/local/lib/python3.11/site-packages/httpcore/_async/connection_pool.py",
line 237, in handle_async_request
response = await connection.handle_async_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpcore/_async/connection.py",
line 90, in handle_async_request
return await self._connection.handle_async_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpcore/_async/http2.py", line
142, in handle_async_request
raise LocalProtocolError(exc) # pragma: nocover
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
httpcore.LocalProtocolError: Invalid input ConnectionInputs.SEND_HEADERS in
state ConnectionState.CLOSED
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/prefect/utilities/services.py",
line 46, in critical_service_loop
await workload()
File
"/usr/local/lib/python3.11/site-packages/prefect/experimental/workers/base.py",
line 372, in get_and_submit_flow_runs
runs_response = await self._get_scheduled_flow_runs()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File
"/usr/local/lib/python3.11/site-packages/prefect/experimental/workers/base.py",
line 440, in _get_scheduled_flow_runs
await self._client.get_scheduled_flow_runs_for_work_pool(
File
"/usr/local/lib/python3.11/site-packages/prefect/client/orchestration.py", line
2269, in get_scheduled_flow_runs_for_work_pool
response = await self._client.post(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_client.py", line 1848, in
post
return await self.request(
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_client.py", line 1533, in
request
return await self.send(request, auth=auth,
follow_redirects=follow_redirects)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^
File "/usr/local/lib/python3.11/site-packages/prefect/client/base.py", line
243, in send
response = await self._send_with_retry(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/prefect/client/base.py", line
189, in _send_with_retry
response = await request()
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_client.py", line 1620, in
send
response = await self._send_handling_auth(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_client.py", line 1648, in
_send_handling_auth
response = await self._send_handling_redirects(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_client.py", line 1685, in
_send_handling_redirects
response = await self._send_single_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_client.py", line 1722, in
_send_single_request
response = await transport.handle_async_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py",
line 352, in handle_async_request
with map_httpcore_exceptions():
File "/usr/local/lib/python3.11/contextlib.py", line 155, in __exit__
self.gen.throw(typ, value, traceback)
File "/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py",
line 77, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.LocalProtocolError: Invalid input ConnectionInputs.SEND_HEADERS in state
ConnectionState.CLOSED
```
### Versions
```Text
Image: prefecthq/prefect-dev:main-python3.11-kubernetes
Image ID: docker.io/prefecthq/prefect-dev@sha256:0b2bd51e56b1a61b8570f5973192270a7afbf49bf808c9e125168f048c25cdb0
```
### Additional context
_No response_
</issue>
<code>
[start of src/prefect/utilities/services.py]
1 import sys
2 from collections import deque
3 from traceback import format_exception
4 from types import TracebackType
5 from typing import Callable, Coroutine, Deque, Tuple
6
7 import anyio
8 import httpx
9
10 from prefect.utilities.collections import distinct
11 from prefect.utilities.math import clamped_poisson_interval
12
13
14 async def critical_service_loop(
15 workload: Callable[..., Coroutine],
16 interval: float,
17 memory: int = 10,
18 consecutive: int = 3,
19 printer: Callable[..., None] = print,
20 run_once: bool = False,
21 jitter_range: float = None,
22 ):
23 """
24 Runs the given `workload` function on the specified `interval`, while being
25 forgiving of intermittent issues like temporary HTTP errors. If more than a certain
26 number of `consecutive` errors occur, print a summary of up to `memory` recent
27 exceptions to `printer`, then exit.
28
29 Args:
30 workload: the function to call
31 interval: how frequently to call it
32 memory: how many recent errors to remember
33 consecutive: how many consecutive errors must we see before we exit
34 printer: a `print`-like function where errors will be reported
35 run_once: if set, the loop will only run once then return
36 jitter_range: if set, the interval will be a random variable (rv) drawn from
37 a clamped Poisson distribution where lambda = interval and the rv is bound
38 between `interval * (1 - range) < rv < interval * (1 + range)`
39 """
40
41 track_record: Deque[bool] = deque([True] * consecutive, maxlen=consecutive)
42 failures: Deque[Tuple[Exception, TracebackType]] = deque(maxlen=memory)
43
44 while True:
45 try:
46 await workload()
47
48 track_record.append(True)
49 except httpx.TransportError as exc:
50 # httpx.TransportError is the base class for any kind of communications
51 # error, like timeouts, connection failures, etc. This does _not_ cover
52 # routine HTTP error codes (even 5xx errors like 502/503) so this
53 # handler should not be attempting to cover cases where the Prefect server
54 # or Prefect Cloud is having an outage (which will be covered by the
55 # exception clause below)
56 track_record.append(False)
57 failures.append((exc, sys.exc_info()[-1]))
58 except httpx.HTTPStatusError as exc:
59 if exc.response.status_code in (502, 503):
60 # 502/503 indicate a potential outage of the Prefect server or Prefect
61 # Cloud, which is likely to be temporary and transient. Don't quit
62 # over these unless it is prolonged.
63 track_record.append(False)
64 failures.append((exc, sys.exc_info()[-1]))
65 else:
66 raise
67 except KeyboardInterrupt:
68 return
69
70 # Decide whether to exit now based on recent history.
71 #
72 # Given some typical background error rate of, say, 1%, we may still observe
73 # quite a few errors in our recent samples, but this is not necessarily a cause
74 # for concern.
75 #
76 # Imagine two distributions that could reflect our situation at any time: the
77 # everything-is-fine distribution of errors, and the everything-is-on-fire
78 # distribution of errors. We are trying to determine which of those two worlds
79 # we are currently experiencing. We compare the likelihood that we'd draw N
80 # consecutive errors from each. In the everything-is-fine distribution, that
81 # would be a very low-probability occurrance, but in the everything-is-on-fire
82 # distribution, that is a high-probability occurrance.
83 #
84 # Remarkably, we only need to look back for a small number of consecutive
85 # errors to have reasonable confidence that this is indeed an anomaly.
86 # @anticorrelator and @chrisguidry estimated that we should only need to look
87 # back for 3 consectutive errors.
88 if not any(track_record):
89 # We've failed enough times to be sure something is wrong, the writing is
90 # on the wall. Let's explain what we've seen and exit.
91 printer(
92 f"\nFailed the last {consecutive} attempts. "
93 "Please check your environment and configuration."
94 )
95
96 printer("Examples of recent errors:\n")
97
98 failures_by_type = distinct(
99 reversed(failures),
100 key=lambda pair: type(pair[0]), # Group by the type of exception
101 )
102 for exception, traceback in failures_by_type:
103 printer("".join(format_exception(None, exception, traceback)))
104 printer()
105 return
106
107 if run_once:
108 return
109
110 if jitter_range is not None:
111 sleep = clamped_poisson_interval(interval, clamping_factor=jitter_range)
112 else:
113 sleep = interval
114
115 await anyio.sleep(sleep)
116
[end of src/prefect/utilities/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/utilities/services.py b/src/prefect/utilities/services.py
--- a/src/prefect/utilities/services.py
+++ b/src/prefect/utilities/services.py
@@ -64,8 +64,6 @@
failures.append((exc, sys.exc_info()[-1]))
else:
raise
- except KeyboardInterrupt:
- return
# Decide whether to exit now based on recent history.
#
@@ -89,7 +87,7 @@
# We've failed enough times to be sure something is wrong, the writing is
# on the wall. Let's explain what we've seen and exit.
printer(
- f"\nFailed the last {consecutive} attempts. "
+ f"\nFailed the last {consecutive} attempts. "
"Please check your environment and configuration."
)
@@ -102,7 +100,8 @@
for exception, traceback in failures_by_type:
printer("".join(format_exception(None, exception, traceback)))
printer()
- return
+
+ raise RuntimeError("Service exceeded error threshold.")
if run_once:
return
| {"golden_diff": "diff --git a/src/prefect/utilities/services.py b/src/prefect/utilities/services.py\n--- a/src/prefect/utilities/services.py\n+++ b/src/prefect/utilities/services.py\n@@ -64,8 +64,6 @@\n failures.append((exc, sys.exc_info()[-1]))\n else:\n raise\n- except KeyboardInterrupt:\n- return\n \n # Decide whether to exit now based on recent history.\n #\n@@ -89,7 +87,7 @@\n # We've failed enough times to be sure something is wrong, the writing is\n # on the wall. Let's explain what we've seen and exit.\n printer(\n- f\"\\nFailed the last {consecutive} attempts. \"\n+ f\"\\nFailed the last {consecutive} attempts. \"\n \"Please check your environment and configuration.\"\n )\n \n@@ -102,7 +100,8 @@\n for exception, traceback in failures_by_type:\n printer(\"\".join(format_exception(None, exception, traceback)))\n printer()\n- return\n+\n+ raise RuntimeError(\"Service exceeded error threshold.\")\n \n if run_once:\n return\n", "issue": "Temporary network connectivity problems cause Worker to stop working\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nNetwork issues seem to get the Worker into a state where it continues to send heartbeats, but stops running flows, resulting in flows getting stuck in \"pending\" state.\n\n### Reproduction\n\n```python3\nRun a worker and wait until it gets \"stuck\"\n```\n\n\n### Error\n\n```python3\n$ k logs prefect-worker-stable-78d664b779-snmln\r\n/usr/local/lib/python3.11/site-packages/prefect/experimental/cli/worker.py:81: ExperimentalFeature: The workers feature is experimental. The interface or behavior may change without warning, we recommend pinning versions to prevent unexpected changes. To disable warnings for this group of experiments, disable PREFECT_EXPERIMENTAL_WARN_WORKERS.\r\n async with worker_cls(\r\nWorker 'KubernetesWorker 3ee16ee0-5575-4d11-975b-a644c66b2362' started!\r\n\r\nFailed the last 3 attempts. Please check your environment and configuration.\r\nExamples of recent errors:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/h2/connection.py\", line 224, in \r\nprocess_input\r\n func, target_state = self._transitions[(self.state, input_)]\r\n ~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^\r\nKeyError: (<ConnectionState.CLOSED: 3>, <ConnectionInputs.SEND_HEADERS: 0>)\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/httpcore/_async/http2.py\", line \r\n106, in handle_async_request\r\n await self._send_request_headers(request=request, stream_id=stream_id)\r\n File \"/usr/local/lib/python3.11/site-packages/httpcore/_async/http2.py\", line \r\n203, in _send_request_headers\r\n self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)\r\n File \"/usr/local/lib/python3.11/site-packages/h2/connection.py\", line 766, in \r\nsend_headers\r\n self.state_machine.process_input(ConnectionInputs.SEND_HEADERS)\r\n File \"/usr/local/lib/python3.11/site-packages/h2/connection.py\", line 228, in \r\nprocess_input\r\n raise ProtocolError(\r\nh2.exceptions.ProtocolError: Invalid input ConnectionInputs.SEND_HEADERS in \r\nstate ConnectionState.CLOSED\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py\", \r\nline 60, in map_httpcore_exceptions\r\n yield\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py\", \r\nline 353, in handle_async_request\r\n resp = await self._pool.handle_async_request(req)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \r\n\"/usr/local/lib/python3.11/site-packages/httpcore/_async/connection_pool.py\", \r\nline 253, in handle_async_request\r\n raise exc\r\n File \r\n\"/usr/local/lib/python3.11/site-packages/httpcore/_async/connection_pool.py\", \r\nline 237, in handle_async_request\r\n response = await connection.handle_async_request(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpcore/_async/connection.py\", \r\nline 90, in handle_async_request\r\n return await self._connection.handle_async_request(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpcore/_async/http2.py\", line \r\n142, in handle_async_request\r\n raise LocalProtocolError(exc) # pragma: nocover\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nhttpcore.LocalProtocolError: Invalid input ConnectionInputs.SEND_HEADERS in \r\nstate ConnectionState.CLOSED\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/prefect/utilities/services.py\", \r\nline 46, in critical_service_loop\r\n await workload()\r\n File \r\n\"/usr/local/lib/python3.11/site-packages/prefect/experimental/workers/base.py\", \r\nline 372, in get_and_submit_flow_runs\r\n runs_response = await self._get_scheduled_flow_runs()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \r\n\"/usr/local/lib/python3.11/site-packages/prefect/experimental/workers/base.py\", \r\nline 440, in _get_scheduled_flow_runs\r\n await self._client.get_scheduled_flow_runs_for_work_pool(\r\n File \r\n\"/usr/local/lib/python3.11/site-packages/prefect/client/orchestration.py\", line \r\n2269, in get_scheduled_flow_runs_for_work_pool\r\n response = await self._client.post(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_client.py\", line 1848, in\r\npost\r\n return await self.request(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_client.py\", line 1533, in\r\nrequest\r\n return await self.send(request, auth=auth, \r\nfollow_redirects=follow_redirects)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n^\r\n File \"/usr/local/lib/python3.11/site-packages/prefect/client/base.py\", line \r\n243, in send\r\n response = await self._send_with_retry(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/prefect/client/base.py\", line \r\n189, in _send_with_retry\r\n response = await request()\r\n ^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_client.py\", line 1620, in\r\nsend\r\n response = await self._send_handling_auth(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_client.py\", line 1648, in\r\n_send_handling_auth\r\n response = await self._send_handling_redirects(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_client.py\", line 1685, in\r\n_send_handling_redirects\r\n response = await self._send_single_request(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_client.py\", line 1722, in\r\n_send_single_request\r\n response = await transport.handle_async_request(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py\", \r\nline 352, in handle_async_request\r\n with map_httpcore_exceptions():\r\n File \"/usr/local/lib/python3.11/contextlib.py\", line 155, in __exit__\r\n self.gen.throw(typ, value, traceback)\r\n File \"/usr/local/lib/python3.11/site-packages/httpx/_transports/default.py\", \r\nline 77, in map_httpcore_exceptions\r\n raise mapped_exc(message) from exc\r\nhttpx.LocalProtocolError: Invalid input ConnectionInputs.SEND_HEADERS in state \r\nConnectionState.CLOSED\n```\n\n\n### Versions\n\n```Text\nImage: prefecthq/prefect-dev:main-python3.11-kubernetes\r\n Image ID: docker.io/prefecthq/prefect-dev@sha256:0b2bd51e56b1a61b8570f5973192270a7afbf49bf808c9e125168f048c25cdb0\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import sys\nfrom collections import deque\nfrom traceback import format_exception\nfrom types import TracebackType\nfrom typing import Callable, Coroutine, Deque, Tuple\n\nimport anyio\nimport httpx\n\nfrom prefect.utilities.collections import distinct\nfrom prefect.utilities.math import clamped_poisson_interval\n\n\nasync def critical_service_loop(\n workload: Callable[..., Coroutine],\n interval: float,\n memory: int = 10,\n consecutive: int = 3,\n printer: Callable[..., None] = print,\n run_once: bool = False,\n jitter_range: float = None,\n):\n \"\"\"\n Runs the given `workload` function on the specified `interval`, while being\n forgiving of intermittent issues like temporary HTTP errors. If more than a certain\n number of `consecutive` errors occur, print a summary of up to `memory` recent\n exceptions to `printer`, then exit.\n\n Args:\n workload: the function to call\n interval: how frequently to call it\n memory: how many recent errors to remember\n consecutive: how many consecutive errors must we see before we exit\n printer: a `print`-like function where errors will be reported\n run_once: if set, the loop will only run once then return\n jitter_range: if set, the interval will be a random variable (rv) drawn from\n a clamped Poisson distribution where lambda = interval and the rv is bound\n between `interval * (1 - range) < rv < interval * (1 + range)`\n \"\"\"\n\n track_record: Deque[bool] = deque([True] * consecutive, maxlen=consecutive)\n failures: Deque[Tuple[Exception, TracebackType]] = deque(maxlen=memory)\n\n while True:\n try:\n await workload()\n\n track_record.append(True)\n except httpx.TransportError as exc:\n # httpx.TransportError is the base class for any kind of communications\n # error, like timeouts, connection failures, etc. This does _not_ cover\n # routine HTTP error codes (even 5xx errors like 502/503) so this\n # handler should not be attempting to cover cases where the Prefect server\n # or Prefect Cloud is having an outage (which will be covered by the\n # exception clause below)\n track_record.append(False)\n failures.append((exc, sys.exc_info()[-1]))\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code in (502, 503):\n # 502/503 indicate a potential outage of the Prefect server or Prefect\n # Cloud, which is likely to be temporary and transient. Don't quit\n # over these unless it is prolonged.\n track_record.append(False)\n failures.append((exc, sys.exc_info()[-1]))\n else:\n raise\n except KeyboardInterrupt:\n return\n\n # Decide whether to exit now based on recent history.\n #\n # Given some typical background error rate of, say, 1%, we may still observe\n # quite a few errors in our recent samples, but this is not necessarily a cause\n # for concern.\n #\n # Imagine two distributions that could reflect our situation at any time: the\n # everything-is-fine distribution of errors, and the everything-is-on-fire\n # distribution of errors. We are trying to determine which of those two worlds\n # we are currently experiencing. We compare the likelihood that we'd draw N\n # consecutive errors from each. In the everything-is-fine distribution, that\n # would be a very low-probability occurrance, but in the everything-is-on-fire\n # distribution, that is a high-probability occurrance.\n #\n # Remarkably, we only need to look back for a small number of consecutive\n # errors to have reasonable confidence that this is indeed an anomaly.\n # @anticorrelator and @chrisguidry estimated that we should only need to look\n # back for 3 consectutive errors.\n if not any(track_record):\n # We've failed enough times to be sure something is wrong, the writing is\n # on the wall. Let's explain what we've seen and exit.\n printer(\n f\"\\nFailed the last {consecutive} attempts. \"\n \"Please check your environment and configuration.\"\n )\n\n printer(\"Examples of recent errors:\\n\")\n\n failures_by_type = distinct(\n reversed(failures),\n key=lambda pair: type(pair[0]), # Group by the type of exception\n )\n for exception, traceback in failures_by_type:\n printer(\"\".join(format_exception(None, exception, traceback)))\n printer()\n return\n\n if run_once:\n return\n\n if jitter_range is not None:\n sleep = clamped_poisson_interval(interval, clamping_factor=jitter_range)\n else:\n sleep = interval\n\n await anyio.sleep(sleep)\n", "path": "src/prefect/utilities/services.py"}]} | 3,739 | 255 |
gh_patches_debug_30 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2249 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E3002 Invalid Property Lambda/Properties/EphemeralStorage
*cfn-lint version: (`cfn-lint --version`)*
`cfn-lint 0.58.4`
*Description of issue.*
Looks like it does not yet recognize `EphemeralStorage` as a valid property for lambdas
```yml
Lambda:
Type: AWS::Lambda::Function
Properties:
Role: !GetAtt Role.Arn
Timeout: 600
MemorySize: 2048
EphemeralStorage:
Size: 1024
```
Link to the [**docs**](https://aws.amazon.com/blogs/compute/using-larger-ephemeral-storage-for-aws-lambda/) where it shows the new feature
Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
</issue>
<code>
[start of src/cfnlint/version.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 __version__ = '0.58.4'
7
[end of src/cfnlint/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py
--- a/src/cfnlint/version.py
+++ b/src/cfnlint/version.py
@@ -3,4 +3,4 @@
SPDX-License-Identifier: MIT-0
"""
-__version__ = '0.58.4'
+__version__ = '0.59.0'
| {"golden_diff": "diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py\n--- a/src/cfnlint/version.py\n+++ b/src/cfnlint/version.py\n@@ -3,4 +3,4 @@\n SPDX-License-Identifier: MIT-0\n \"\"\"\n \n-__version__ = '0.58.4'\n+__version__ = '0.59.0'\n", "issue": "E3002 Invalid Property Lambda/Properties/EphemeralStorage\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n`cfn-lint 0.58.4`\r\n\r\n*Description of issue.*\r\nLooks like it does not yet recognize `EphemeralStorage` as a valid property for lambdas\r\n```yml\r\nLambda:\r\n Type: AWS::Lambda::Function\r\n Properties:\r\n Role: !GetAtt Role.Arn\r\n Timeout: 600\r\n MemorySize: 2048\r\n EphemeralStorage:\r\n Size: 1024\r\n```\r\n\r\nLink to the [**docs**](https://aws.amazon.com/blogs/compute/using-larger-ephemeral-storage-for-aws-lambda/) where it shows the new feature\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.58.4'\n", "path": "src/cfnlint/version.py"}]} | 834 | 82 |
gh_patches_debug_19074 | rasdani/github-patches | git_diff | getpelican__pelican-280 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feeds generate malformed urls (with double slash)
When setting a site url that ends with a slash (like http://feldboris.alwaysdata.net/blog/), pelican will generate url in feeds with double slash (like http://feldboris.alwaysdata.net/blog//feeds/all.atom.xml) just after site url. W3C feeds validator show a warning with these double slashs.
It's maybe the cause of a rss problem (my rss client show old feeds entries as unread) as post fields also contains double slash now.
</issue>
<code>
[start of pelican/settings.py]
1 # -*- coding: utf-8 -*-
2 import os
3 import locale
4 import logging
5
6 from os.path import isabs
7
8
9 logger = logging.getLogger(__name__)
10
11
12 DEFAULT_THEME = os.sep.join([os.path.dirname(os.path.abspath(__file__)),
13 "themes/notmyidea"])
14 _DEFAULT_CONFIG = {'PATH': '.',
15 'ARTICLE_DIR': '',
16 'ARTICLE_EXCLUDES': ('pages',),
17 'PAGE_DIR': 'pages',
18 'PAGE_EXCLUDES': (),
19 'THEME': DEFAULT_THEME,
20 'OUTPUT_PATH': 'output/',
21 'MARKUP': ('rst', 'md'),
22 'STATIC_PATHS': ['images', ],
23 'THEME_STATIC_PATHS': ['static', ],
24 'FEED': 'feeds/all.atom.xml',
25 'FEED_MAIN_URL': 'feeds/all.atom.xml',
26 'CATEGORY_FEED': 'feeds/%s.atom.xml',
27 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml',
28 'FEED_MAX_ITEMS': '',
29 'SITENAME': 'A Pelican Blog',
30 'DISPLAY_PAGES_ON_MENU': True,
31 'PDF_GENERATOR': False,
32 'DEFAULT_CATEGORY': 'misc',
33 'FALLBACK_ON_FS_DATE': True,
34 'WITH_FUTURE_DATES': True,
35 'CSS_FILE': 'main.css',
36 'REVERSE_ARCHIVE_ORDER': False,
37 'REVERSE_CATEGORY_ORDER': False,
38 'DELETE_OUTPUT_DIRECTORY': False,
39 'ARTICLE_URL': '{slug}.html',
40 'ARTICLE_SAVE_AS': '{slug}.html',
41 'ARTICLE_LANG_URL': '{slug}-{lang}.html',
42 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',
43 'PAGE_URL': 'pages/{slug}.html',
44 'PAGE_SAVE_AS': 'pages/{slug}.html',
45 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',
46 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html',
47 'CATEGORY_URL': 'category/{name}.html',
48 'CATEGORY_SAVE_AS': 'category/{name}.html',
49 'TAG_URL': 'tag/{slug}.html',
50 'TAG_SAVE_AS': 'tag/{slug}.html',
51 'AUTHOR_URL': u'author/{slug}.html',
52 'AUTHOR_SAVE_AS': u'author/{slug}.html',
53 'RELATIVE_URLS': True,
54 'DEFAULT_LANG': 'en',
55 'TAG_CLOUD_STEPS': 4,
56 'TAG_CLOUD_MAX_ITEMS': 100,
57 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'archives'),
58 'PAGINATED_DIRECT_TEMPLATES': ('index', ),
59 'PELICAN_CLASS': 'pelican.Pelican',
60 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',
61 'DATE_FORMATS': {},
62 'JINJA_EXTENSIONS': [],
63 'LOCALE': '', # default to user locale
64 'DEFAULT_PAGINATION': False,
65 'DEFAULT_ORPHANS': 0,
66 'DEFAULT_METADATA': (),
67 'FILES_TO_COPY': (),
68 'DEFAULT_STATUS': 'published',
69 'ARTICLE_PERMALINK_STRUCTURE': '',
70 'TYPOGRIFY': False,
71 }
72
73
74 def read_settings(filename=None):
75 if filename:
76 local_settings = get_settings_from_file(filename)
77 else:
78 local_settings = _DEFAULT_CONFIG
79 configured_settings = configure_settings(local_settings, None, filename)
80 return configured_settings
81
82
83 def get_settings_from_file(filename, default_settings=None):
84 """Load a Python file into a dictionary.
85 """
86 if default_settings == None:
87 default_settings = _DEFAULT_CONFIG
88 context = default_settings.copy()
89 if filename:
90 tempdict = {}
91 execfile(filename, tempdict)
92 for key in tempdict:
93 if key.isupper():
94 context[key] = tempdict[key]
95 return context
96
97
98 def configure_settings(settings, default_settings=None, filename=None):
99 """Provide optimizations, error checking, and warnings for loaded settings"""
100 if default_settings is None:
101 default_settings = _DEFAULT_CONFIG
102
103 # Make the paths relative to the settings file
104 if filename:
105 for path in ['PATH', 'OUTPUT_PATH']:
106 if path in settings:
107 if settings[path] is not None and not isabs(settings[path]):
108 settings[path] = os.path.abspath(os.path.normpath(
109 os.path.join(os.path.dirname(filename), settings[path]))
110 )
111
112 # if locales is not a list, make it one
113 locales = settings['LOCALE']
114
115 if isinstance(locales, basestring):
116 locales = [locales]
117
118 # try to set the different locales, fallback on the default.
119 if not locales:
120 locales = _DEFAULT_CONFIG['LOCALE']
121
122 for locale_ in locales:
123 try:
124 locale.setlocale(locale.LC_ALL, locale_)
125 break # break if it is successfull
126 except locale.Error:
127 pass
128 else:
129 logger.warn("LOCALE option doesn't contain a correct value")
130
131 # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL
132 if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings):
133 settings['FEED_DOMAIN'] = settings['SITEURL']
134
135 # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
136 if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):
137 logger.warn("Since feed URLs should always be absolute, you should specify "
138 "FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = "
139 "http://www.example.com')")
140
141 if not 'TIMEZONE' in settings:
142 logger.warn("No timezone information specified in the settings. Assuming"
143 " your timezone is UTC for feed generation. Check "
144 "http://docs.notmyidea.org/alexis/pelican/settings.html#timezone "
145 "for more information")
146
147 return settings
148
[end of pelican/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/settings.py b/pelican/settings.py
--- a/pelican/settings.py
+++ b/pelican/settings.py
@@ -128,9 +128,15 @@
else:
logger.warn("LOCALE option doesn't contain a correct value")
- # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL
- if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings):
- settings['FEED_DOMAIN'] = settings['SITEURL']
+ if ('SITEURL' in settings):
+ # If SITEURL has a trailing slash, remove it and provide a warning
+ siteurl = settings['SITEURL']
+ if (siteurl[len(siteurl) - 1:] == '/'):
+ settings['SITEURL'] = siteurl[:-1]
+ logger.warn("Removed extraneous trailing slash from SITEURL.")
+ # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL
+ if not 'FEED_DOMAIN' in settings:
+ settings['FEED_DOMAIN'] = settings['SITEURL']
# Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):
| {"golden_diff": "diff --git a/pelican/settings.py b/pelican/settings.py\n--- a/pelican/settings.py\n+++ b/pelican/settings.py\n@@ -128,9 +128,15 @@\n else:\n logger.warn(\"LOCALE option doesn't contain a correct value\")\n \n- # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n- if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings):\n- settings['FEED_DOMAIN'] = settings['SITEURL']\n+ if ('SITEURL' in settings):\n+ # If SITEURL has a trailing slash, remove it and provide a warning\n+ siteurl = settings['SITEURL']\n+ if (siteurl[len(siteurl) - 1:] == '/'):\n+ settings['SITEURL'] = siteurl[:-1]\n+ logger.warn(\"Removed extraneous trailing slash from SITEURL.\")\n+ # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n+ if not 'FEED_DOMAIN' in settings:\n+ settings['FEED_DOMAIN'] = settings['SITEURL']\n \n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):\n", "issue": "Feeds generate malformed urls (with double slash)\nWhen setting a site url that ends with a slash (like http://feldboris.alwaysdata.net/blog/), pelican will generate url in feeds with double slash (like http://feldboris.alwaysdata.net/blog//feeds/all.atom.xml) just after site url. W3C feeds validator show a warning with these double slashs.\n\nIt's maybe the cause of a rss problem (my rss client show old feeds entries as unread) as post fields also contains double slash now.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport locale\nimport logging\n\nfrom os.path import isabs\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_THEME = os.sep.join([os.path.dirname(os.path.abspath(__file__)),\n \"themes/notmyidea\"])\n_DEFAULT_CONFIG = {'PATH': '.',\n 'ARTICLE_DIR': '',\n 'ARTICLE_EXCLUDES': ('pages',),\n 'PAGE_DIR': 'pages',\n 'PAGE_EXCLUDES': (),\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output/',\n 'MARKUP': ('rst', 'md'),\n 'STATIC_PATHS': ['images', ],\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED': 'feeds/all.atom.xml',\n 'FEED_MAIN_URL': 'feeds/all.atom.xml',\n 'CATEGORY_FEED': 'feeds/%s.atom.xml',\n 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml',\n 'FEED_MAX_ITEMS': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'PDF_GENERATOR': False,\n 'DEFAULT_CATEGORY': 'misc',\n 'FALLBACK_ON_FS_DATE': True,\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'REVERSE_ARCHIVE_ORDER': False,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': 'pages/{slug}.html',\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html',\n 'CATEGORY_URL': 'category/{name}.html',\n 'CATEGORY_SAVE_AS': 'category/{name}.html',\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': 'tag/{slug}.html',\n 'AUTHOR_URL': u'author/{slug}.html',\n 'AUTHOR_SAVE_AS': u'author/{slug}.html',\n 'RELATIVE_URLS': True,\n 'DEFAULT_LANG': 'en',\n 'TAG_CLOUD_STEPS': 4,\n 'TAG_CLOUD_MAX_ITEMS': 100,\n 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'archives'),\n 'PAGINATED_DIRECT_TEMPLATES': ('index', ),\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'JINJA_EXTENSIONS': [],\n 'LOCALE': '', # default to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': (),\n 'FILES_TO_COPY': (),\n 'DEFAULT_STATUS': 'published',\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n }\n\n\ndef read_settings(filename=None):\n if filename:\n local_settings = get_settings_from_file(filename)\n else:\n local_settings = _DEFAULT_CONFIG\n configured_settings = configure_settings(local_settings, None, filename)\n return configured_settings\n\n\ndef get_settings_from_file(filename, default_settings=None):\n \"\"\"Load a Python file into a dictionary.\n \"\"\"\n if default_settings == None:\n default_settings = _DEFAULT_CONFIG\n context = default_settings.copy()\n if filename:\n tempdict = {}\n execfile(filename, tempdict)\n for key in tempdict:\n if key.isupper():\n context[key] = tempdict[key]\n return context\n\n\ndef configure_settings(settings, default_settings=None, filename=None):\n \"\"\"Provide optimizations, error checking, and warnings for loaded settings\"\"\"\n if default_settings is None:\n default_settings = _DEFAULT_CONFIG\n\n # Make the paths relative to the settings file\n if filename:\n for path in ['PATH', 'OUTPUT_PATH']:\n if path in settings:\n if settings[path] is not None and not isabs(settings[path]):\n settings[path] = os.path.abspath(os.path.normpath(\n os.path.join(os.path.dirname(filename), settings[path]))\n )\n\n # if locales is not a list, make it one\n locales = settings['LOCALE']\n\n if isinstance(locales, basestring):\n locales = [locales]\n\n # try to set the different locales, fallback on the default.\n if not locales:\n locales = _DEFAULT_CONFIG['LOCALE']\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, locale_)\n break # break if it is successfull\n except locale.Error:\n pass\n else:\n logger.warn(\"LOCALE option doesn't contain a correct value\")\n\n # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL\n if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings):\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):\n logger.warn(\"Since feed URLs should always be absolute, you should specify \"\n \"FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = \"\n \"http://www.example.com')\")\n\n if not 'TIMEZONE' in settings:\n logger.warn(\"No timezone information specified in the settings. Assuming\"\n \" your timezone is UTC for feed generation. Check \"\n \"http://docs.notmyidea.org/alexis/pelican/settings.html#timezone \"\n \"for more information\")\n\n return settings\n", "path": "pelican/settings.py"}]} | 2,264 | 302 |
gh_patches_debug_9059 | rasdani/github-patches | git_diff | feast-dev__feast-2606 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
spark source complain about "table or view not found" error.
## Expected Behavior
spark offline store
## Current Behavior
if set table of SparkSource with pattern "db.table",feast will complain about "table or view not found" error.
## Steps to reproduce
1. feast init test_repo
2. modify example.py to use SparkSource, and set table of SparkSource with pattern "db.table"
3. configure featue_store.yaml
4. feast apply
### Specifications
- Version: 0.20.1
- Platform: ubuntu 18/04
- Subsystem:
## Possible Solution
</issue>
<code>
[start of sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py]
1 import logging
2 import traceback
3 import warnings
4 from enum import Enum
5 from typing import Any, Callable, Dict, Iterable, Optional, Tuple
6
7 from pyspark.sql import SparkSession
8
9 from feast.data_source import DataSource
10 from feast.errors import DataSourceNoNameException
11 from feast.infra.offline_stores.offline_utils import get_temp_entity_table_name
12 from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto
13 from feast.protos.feast.core.SavedDataset_pb2 import (
14 SavedDatasetStorage as SavedDatasetStorageProto,
15 )
16 from feast.repo_config import RepoConfig
17 from feast.saved_dataset import SavedDatasetStorage
18 from feast.type_map import spark_to_feast_value_type
19 from feast.value_type import ValueType
20
21 logger = logging.getLogger(__name__)
22
23
24 class SparkSourceFormat(Enum):
25 csv = "csv"
26 json = "json"
27 parquet = "parquet"
28 delta = "delta"
29 avro = "avro"
30
31
32 class SparkSource(DataSource):
33 def __init__(
34 self,
35 *,
36 name: Optional[str] = None,
37 table: Optional[str] = None,
38 query: Optional[str] = None,
39 path: Optional[str] = None,
40 file_format: Optional[str] = None,
41 event_timestamp_column: Optional[str] = None,
42 created_timestamp_column: Optional[str] = None,
43 field_mapping: Optional[Dict[str, str]] = None,
44 date_partition_column: Optional[str] = None,
45 description: Optional[str] = "",
46 tags: Optional[Dict[str, str]] = None,
47 owner: Optional[str] = "",
48 timestamp_field: Optional[str] = None,
49 ):
50 # If no name, use the table_ref as the default name
51 _name = name
52 if not _name:
53 if table:
54 _name = table
55 else:
56 raise DataSourceNoNameException()
57
58 if date_partition_column:
59 warnings.warn(
60 (
61 "The argument 'date_partition_column' is not supported for Spark sources."
62 "It will be removed in Feast 0.24+"
63 ),
64 DeprecationWarning,
65 )
66
67 super().__init__(
68 name=_name,
69 event_timestamp_column=event_timestamp_column,
70 created_timestamp_column=created_timestamp_column,
71 field_mapping=field_mapping,
72 description=description,
73 tags=tags,
74 owner=owner,
75 timestamp_field=timestamp_field,
76 )
77 warnings.warn(
78 "The spark data source API is an experimental feature in alpha development. "
79 "This API is unstable and it could and most probably will be changed in the future.",
80 RuntimeWarning,
81 )
82 self.spark_options = SparkOptions(
83 table=table,
84 query=query,
85 path=path,
86 file_format=file_format,
87 )
88
89 @property
90 def table(self):
91 """
92 Returns the table of this feature data source
93 """
94 return self.spark_options.table
95
96 @property
97 def query(self):
98 """
99 Returns the query of this feature data source
100 """
101 return self.spark_options.query
102
103 @property
104 def path(self):
105 """
106 Returns the path of the spark data source file.
107 """
108 return self.spark_options.path
109
110 @property
111 def file_format(self):
112 """
113 Returns the file format of this feature data source.
114 """
115 return self.spark_options.file_format
116
117 @staticmethod
118 def from_proto(data_source: DataSourceProto) -> Any:
119 assert data_source.HasField("spark_options")
120 spark_options = SparkOptions.from_proto(data_source.spark_options)
121
122 return SparkSource(
123 name=data_source.name,
124 field_mapping=dict(data_source.field_mapping),
125 table=spark_options.table,
126 query=spark_options.query,
127 path=spark_options.path,
128 file_format=spark_options.file_format,
129 timestamp_field=data_source.timestamp_field,
130 created_timestamp_column=data_source.created_timestamp_column,
131 description=data_source.description,
132 tags=dict(data_source.tags),
133 owner=data_source.owner,
134 )
135
136 def to_proto(self) -> DataSourceProto:
137 data_source_proto = DataSourceProto(
138 name=self.name,
139 type=DataSourceProto.BATCH_SPARK,
140 data_source_class_type="feast.infra.offline_stores.contrib.spark_offline_store.spark_source.SparkSource",
141 field_mapping=self.field_mapping,
142 spark_options=self.spark_options.to_proto(),
143 description=self.description,
144 tags=self.tags,
145 owner=self.owner,
146 )
147
148 data_source_proto.timestamp_field = self.timestamp_field
149 data_source_proto.created_timestamp_column = self.created_timestamp_column
150
151 return data_source_proto
152
153 def validate(self, config: RepoConfig):
154 self.get_table_column_names_and_types(config)
155
156 @staticmethod
157 def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:
158 return spark_to_feast_value_type
159
160 def get_table_column_names_and_types(
161 self, config: RepoConfig
162 ) -> Iterable[Tuple[str, str]]:
163 from feast.infra.offline_stores.contrib.spark_offline_store.spark import (
164 get_spark_session_or_start_new_with_repoconfig,
165 )
166
167 spark_session = get_spark_session_or_start_new_with_repoconfig(
168 store_config=config.offline_store
169 )
170 df = spark_session.sql(f"SELECT * FROM {self.get_table_query_string()}")
171 return (
172 (fields["name"], fields["type"])
173 for fields in df.schema.jsonValue()["fields"]
174 )
175
176 def get_table_query_string(self) -> str:
177 """Returns a string that can directly be used to reference this table in SQL"""
178 if self.table:
179 # Backticks make sure that spark sql knows this a table reference.
180 return f"`{self.table}`"
181 if self.query:
182 return f"({self.query})"
183
184 # If both the table query string and the actual query are null, we can load from file.
185 spark_session = SparkSession.getActiveSession()
186 if spark_session is None:
187 raise AssertionError("Could not find an active spark session.")
188 try:
189 df = spark_session.read.format(self.file_format).load(self.path)
190 except Exception:
191 logger.exception(
192 "Spark read of file source failed.\n" + traceback.format_exc()
193 )
194 tmp_table_name = get_temp_entity_table_name()
195 df.createOrReplaceTempView(tmp_table_name)
196
197 return f"`{tmp_table_name}`"
198
199
200 class SparkOptions:
201 allowed_formats = [format.value for format in SparkSourceFormat]
202
203 def __init__(
204 self,
205 table: Optional[str],
206 query: Optional[str],
207 path: Optional[str],
208 file_format: Optional[str],
209 ):
210 # Check that only one of the ways to load a spark dataframe can be used. We have
211 # to treat empty string and null the same due to proto (de)serialization.
212 if sum([(not (not arg)) for arg in [table, query, path]]) != 1:
213 raise ValueError(
214 "Exactly one of params(table, query, path) must be specified."
215 )
216 if path:
217 if not file_format:
218 raise ValueError(
219 "If 'path' is specified, then 'file_format' is required."
220 )
221 if file_format not in self.allowed_formats:
222 raise ValueError(
223 f"'file_format' should be one of {self.allowed_formats}"
224 )
225
226 self._table = table
227 self._query = query
228 self._path = path
229 self._file_format = file_format
230
231 @property
232 def table(self):
233 return self._table
234
235 @table.setter
236 def table(self, table):
237 self._table = table
238
239 @property
240 def query(self):
241 return self._query
242
243 @query.setter
244 def query(self, query):
245 self._query = query
246
247 @property
248 def path(self):
249 return self._path
250
251 @path.setter
252 def path(self, path):
253 self._path = path
254
255 @property
256 def file_format(self):
257 return self._file_format
258
259 @file_format.setter
260 def file_format(self, file_format):
261 self._file_format = file_format
262
263 @classmethod
264 def from_proto(cls, spark_options_proto: DataSourceProto.SparkOptions):
265 """
266 Creates a SparkOptions from a protobuf representation of a spark option
267 args:
268 spark_options_proto: a protobuf representation of a datasource
269 Returns:
270 Returns a SparkOptions object based on the spark_options protobuf
271 """
272 spark_options = cls(
273 table=spark_options_proto.table,
274 query=spark_options_proto.query,
275 path=spark_options_proto.path,
276 file_format=spark_options_proto.file_format,
277 )
278
279 return spark_options
280
281 def to_proto(self) -> DataSourceProto.SparkOptions:
282 """
283 Converts an SparkOptionsProto object to its protobuf representation.
284 Returns:
285 SparkOptionsProto protobuf
286 """
287 spark_options_proto = DataSourceProto.SparkOptions(
288 table=self.table,
289 query=self.query,
290 path=self.path,
291 file_format=self.file_format,
292 )
293
294 return spark_options_proto
295
296
297 class SavedDatasetSparkStorage(SavedDatasetStorage):
298 _proto_attr_name = "spark_storage"
299
300 spark_options: SparkOptions
301
302 def __init__(
303 self,
304 table: Optional[str] = None,
305 query: Optional[str] = None,
306 path: Optional[str] = None,
307 file_format: Optional[str] = None,
308 ):
309 self.spark_options = SparkOptions(
310 table=table,
311 query=query,
312 path=path,
313 file_format=file_format,
314 )
315
316 @staticmethod
317 def from_proto(storage_proto: SavedDatasetStorageProto) -> SavedDatasetStorage:
318 spark_options = SparkOptions.from_proto(storage_proto.spark_storage)
319 return SavedDatasetSparkStorage(
320 table=spark_options.table,
321 query=spark_options.query,
322 path=spark_options.path,
323 file_format=spark_options.file_format,
324 )
325
326 def to_proto(self) -> SavedDatasetStorageProto:
327 return SavedDatasetStorageProto(spark_storage=self.spark_options.to_proto())
328
329 def to_data_source(self) -> DataSource:
330 return SparkSource(
331 table=self.spark_options.table,
332 query=self.spark_options.query,
333 path=self.spark_options.path,
334 file_format=self.spark_options.file_format,
335 )
336
[end of sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py
--- a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py
+++ b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py
@@ -177,7 +177,8 @@
"""Returns a string that can directly be used to reference this table in SQL"""
if self.table:
# Backticks make sure that spark sql knows this a table reference.
- return f"`{self.table}`"
+ table = ".".join([f"`{x}`" for x in self.table.split(".")])
+ return table
if self.query:
return f"({self.query})"
| {"golden_diff": "diff --git a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py\n--- a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py\n+++ b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py\n@@ -177,7 +177,8 @@\n \"\"\"Returns a string that can directly be used to reference this table in SQL\"\"\"\n if self.table:\n # Backticks make sure that spark sql knows this a table reference.\n- return f\"`{self.table}`\"\n+ table = \".\".join([f\"`{x}`\" for x in self.table.split(\".\")])\n+ return table\n if self.query:\n return f\"({self.query})\"\n", "issue": "spark source complain about \"table or view not found\" error.\n## Expected Behavior \r\nspark offline store\r\n\r\n## Current Behavior\r\nif set table of SparkSource with pattern \"db.table\"\uff0cfeast will complain about \"table or view not found\" error.\r\n\r\n## Steps to reproduce\r\n1. feast init test_repo\r\n2. modify example.py to use SparkSource, and set table of SparkSource with pattern \"db.table\"\r\n3. configure featue_store.yaml\r\n4. feast apply\r\n\r\n### Specifications\r\n\r\n- Version: 0.20.1\r\n- Platform: ubuntu 18/04\r\n- Subsystem:\r\n\r\n## Possible Solution\r\n\n", "before_files": [{"content": "import logging\nimport traceback\nimport warnings\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, Iterable, Optional, Tuple\n\nfrom pyspark.sql import SparkSession\n\nfrom feast.data_source import DataSource\nfrom feast.errors import DataSourceNoNameException\nfrom feast.infra.offline_stores.offline_utils import get_temp_entity_table_name\nfrom feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto\nfrom feast.protos.feast.core.SavedDataset_pb2 import (\n SavedDatasetStorage as SavedDatasetStorageProto,\n)\nfrom feast.repo_config import RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\nfrom feast.type_map import spark_to_feast_value_type\nfrom feast.value_type import ValueType\n\nlogger = logging.getLogger(__name__)\n\n\nclass SparkSourceFormat(Enum):\n csv = \"csv\"\n json = \"json\"\n parquet = \"parquet\"\n delta = \"delta\"\n avro = \"avro\"\n\n\nclass SparkSource(DataSource):\n def __init__(\n self,\n *,\n name: Optional[str] = None,\n table: Optional[str] = None,\n query: Optional[str] = None,\n path: Optional[str] = None,\n file_format: Optional[str] = None,\n event_timestamp_column: Optional[str] = None,\n created_timestamp_column: Optional[str] = None,\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = None,\n description: Optional[str] = \"\",\n tags: Optional[Dict[str, str]] = None,\n owner: Optional[str] = \"\",\n timestamp_field: Optional[str] = None,\n ):\n # If no name, use the table_ref as the default name\n _name = name\n if not _name:\n if table:\n _name = table\n else:\n raise DataSourceNoNameException()\n\n if date_partition_column:\n warnings.warn(\n (\n \"The argument 'date_partition_column' is not supported for Spark sources.\"\n \"It will be removed in Feast 0.24+\"\n ),\n DeprecationWarning,\n )\n\n super().__init__(\n name=_name,\n event_timestamp_column=event_timestamp_column,\n created_timestamp_column=created_timestamp_column,\n field_mapping=field_mapping,\n description=description,\n tags=tags,\n owner=owner,\n timestamp_field=timestamp_field,\n )\n warnings.warn(\n \"The spark data source API is an experimental feature in alpha development. \"\n \"This API is unstable and it could and most probably will be changed in the future.\",\n RuntimeWarning,\n )\n self.spark_options = SparkOptions(\n table=table,\n query=query,\n path=path,\n file_format=file_format,\n )\n\n @property\n def table(self):\n \"\"\"\n Returns the table of this feature data source\n \"\"\"\n return self.spark_options.table\n\n @property\n def query(self):\n \"\"\"\n Returns the query of this feature data source\n \"\"\"\n return self.spark_options.query\n\n @property\n def path(self):\n \"\"\"\n Returns the path of the spark data source file.\n \"\"\"\n return self.spark_options.path\n\n @property\n def file_format(self):\n \"\"\"\n Returns the file format of this feature data source.\n \"\"\"\n return self.spark_options.file_format\n\n @staticmethod\n def from_proto(data_source: DataSourceProto) -> Any:\n assert data_source.HasField(\"spark_options\")\n spark_options = SparkOptions.from_proto(data_source.spark_options)\n\n return SparkSource(\n name=data_source.name,\n field_mapping=dict(data_source.field_mapping),\n table=spark_options.table,\n query=spark_options.query,\n path=spark_options.path,\n file_format=spark_options.file_format,\n timestamp_field=data_source.timestamp_field,\n created_timestamp_column=data_source.created_timestamp_column,\n description=data_source.description,\n tags=dict(data_source.tags),\n owner=data_source.owner,\n )\n\n def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n name=self.name,\n type=DataSourceProto.BATCH_SPARK,\n data_source_class_type=\"feast.infra.offline_stores.contrib.spark_offline_store.spark_source.SparkSource\",\n field_mapping=self.field_mapping,\n spark_options=self.spark_options.to_proto(),\n description=self.description,\n tags=self.tags,\n owner=self.owner,\n )\n\n data_source_proto.timestamp_field = self.timestamp_field\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n\n return data_source_proto\n\n def validate(self, config: RepoConfig):\n self.get_table_column_names_and_types(config)\n\n @staticmethod\n def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:\n return spark_to_feast_value_type\n\n def get_table_column_names_and_types(\n self, config: RepoConfig\n ) -> Iterable[Tuple[str, str]]:\n from feast.infra.offline_stores.contrib.spark_offline_store.spark import (\n get_spark_session_or_start_new_with_repoconfig,\n )\n\n spark_session = get_spark_session_or_start_new_with_repoconfig(\n store_config=config.offline_store\n )\n df = spark_session.sql(f\"SELECT * FROM {self.get_table_query_string()}\")\n return (\n (fields[\"name\"], fields[\"type\"])\n for fields in df.schema.jsonValue()[\"fields\"]\n )\n\n def get_table_query_string(self) -> str:\n \"\"\"Returns a string that can directly be used to reference this table in SQL\"\"\"\n if self.table:\n # Backticks make sure that spark sql knows this a table reference.\n return f\"`{self.table}`\"\n if self.query:\n return f\"({self.query})\"\n\n # If both the table query string and the actual query are null, we can load from file.\n spark_session = SparkSession.getActiveSession()\n if spark_session is None:\n raise AssertionError(\"Could not find an active spark session.\")\n try:\n df = spark_session.read.format(self.file_format).load(self.path)\n except Exception:\n logger.exception(\n \"Spark read of file source failed.\\n\" + traceback.format_exc()\n )\n tmp_table_name = get_temp_entity_table_name()\n df.createOrReplaceTempView(tmp_table_name)\n\n return f\"`{tmp_table_name}`\"\n\n\nclass SparkOptions:\n allowed_formats = [format.value for format in SparkSourceFormat]\n\n def __init__(\n self,\n table: Optional[str],\n query: Optional[str],\n path: Optional[str],\n file_format: Optional[str],\n ):\n # Check that only one of the ways to load a spark dataframe can be used. We have\n # to treat empty string and null the same due to proto (de)serialization.\n if sum([(not (not arg)) for arg in [table, query, path]]) != 1:\n raise ValueError(\n \"Exactly one of params(table, query, path) must be specified.\"\n )\n if path:\n if not file_format:\n raise ValueError(\n \"If 'path' is specified, then 'file_format' is required.\"\n )\n if file_format not in self.allowed_formats:\n raise ValueError(\n f\"'file_format' should be one of {self.allowed_formats}\"\n )\n\n self._table = table\n self._query = query\n self._path = path\n self._file_format = file_format\n\n @property\n def table(self):\n return self._table\n\n @table.setter\n def table(self, table):\n self._table = table\n\n @property\n def query(self):\n return self._query\n\n @query.setter\n def query(self, query):\n self._query = query\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n self._path = path\n\n @property\n def file_format(self):\n return self._file_format\n\n @file_format.setter\n def file_format(self, file_format):\n self._file_format = file_format\n\n @classmethod\n def from_proto(cls, spark_options_proto: DataSourceProto.SparkOptions):\n \"\"\"\n Creates a SparkOptions from a protobuf representation of a spark option\n args:\n spark_options_proto: a protobuf representation of a datasource\n Returns:\n Returns a SparkOptions object based on the spark_options protobuf\n \"\"\"\n spark_options = cls(\n table=spark_options_proto.table,\n query=spark_options_proto.query,\n path=spark_options_proto.path,\n file_format=spark_options_proto.file_format,\n )\n\n return spark_options\n\n def to_proto(self) -> DataSourceProto.SparkOptions:\n \"\"\"\n Converts an SparkOptionsProto object to its protobuf representation.\n Returns:\n SparkOptionsProto protobuf\n \"\"\"\n spark_options_proto = DataSourceProto.SparkOptions(\n table=self.table,\n query=self.query,\n path=self.path,\n file_format=self.file_format,\n )\n\n return spark_options_proto\n\n\nclass SavedDatasetSparkStorage(SavedDatasetStorage):\n _proto_attr_name = \"spark_storage\"\n\n spark_options: SparkOptions\n\n def __init__(\n self,\n table: Optional[str] = None,\n query: Optional[str] = None,\n path: Optional[str] = None,\n file_format: Optional[str] = None,\n ):\n self.spark_options = SparkOptions(\n table=table,\n query=query,\n path=path,\n file_format=file_format,\n )\n\n @staticmethod\n def from_proto(storage_proto: SavedDatasetStorageProto) -> SavedDatasetStorage:\n spark_options = SparkOptions.from_proto(storage_proto.spark_storage)\n return SavedDatasetSparkStorage(\n table=spark_options.table,\n query=spark_options.query,\n path=spark_options.path,\n file_format=spark_options.file_format,\n )\n\n def to_proto(self) -> SavedDatasetStorageProto:\n return SavedDatasetStorageProto(spark_storage=self.spark_options.to_proto())\n\n def to_data_source(self) -> DataSource:\n return SparkSource(\n table=self.spark_options.table,\n query=self.spark_options.query,\n path=self.spark_options.path,\n file_format=self.spark_options.file_format,\n )\n", "path": "sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py"}]} | 3,794 | 201 |
gh_patches_debug_43133 | rasdani/github-patches | git_diff | learningequality__kolibri-4737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hashi no hash-y
### Observed behavior
Currently we build hashi without a build hash. This means that in the future if we upgrade our Hashi functionality, this may not get cache busted.
### Expected behavior
New versions of Hashi should cache bust old versions!
### User-facing consequences
Weird, unpredictable Hashi behaviour, depending on previous use.
Not possible to 'complete' an HTML5 App
### Observed behavior
In introducing Hashi, the 15s insta-points was removed from HTML5 apps. Now impossible to register any progress.
### Expected behavior
What should progress look like for an arbitrary HTML5 app?
### User-facing consequences
Confusion?
</issue>
<code>
[start of kolibri/core/content/views.py]
1 import mimetypes
2 import os
3 import zipfile
4
5 from django.core.cache import cache
6 from django.http import Http404
7 from django.http import HttpResponse
8 from django.http.response import FileResponse
9 from django.http.response import HttpResponseNotModified
10 from django.urls import reverse
11 from django.template import loader
12 from django.views.decorators.clickjacking import xframe_options_exempt
13 from django.views.decorators.vary import vary_on_headers
14 from django.views.generic.base import View
15 from le_utils.constants import exercises
16 from six.moves.urllib.parse import urlparse
17 from six.moves.urllib.parse import urlunparse
18
19 from .api import cache_forever
20 from .utils.paths import get_content_storage_file_path
21 from kolibri.utils.conf import OPTIONS
22
23 # Do this to prevent import of broken Windows filetype registry that makes guesstype not work.
24 # https://www.thecodingforums.com/threads/mimetypes-guess_type-broken-in-windows-on-py2-7-and-python-3-x.952693/
25 mimetypes.init([os.path.join(os.path.dirname(__file__), 'constants', 'mime.types')])
26
27
28 def _add_access_control_headers(request, response):
29 response["Access-Control-Allow-Origin"] = "*"
30 response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
31 requested_headers = request.META.get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS", "")
32 if requested_headers:
33 response["Access-Control-Allow-Headers"] = requested_headers
34
35
36 def get_referrer_url(request):
37 if request.META.get('HTTP_REFERER'):
38 # If available use HTTP_REFERER to infer the host as that will give us more
39 # information if Kolibri is behind a proxy.
40 return urlparse(request.META.get('HTTP_REFERER'))
41
42
43 def generate_image_prefix_url(request, zipped_filename):
44 parsed_referrer_url = get_referrer_url(request)
45 # Remove trailing slash
46 zipcontent = reverse(
47 'kolibri:core:zipcontent',
48 kwargs={
49 "zipped_filename": zipped_filename,
50 "embedded_filepath": ''
51 })[:-1]
52 if parsed_referrer_url:
53 # Reconstruct the parsed URL using a blank scheme and host + port(1)
54 zipcontent = urlunparse(('', parsed_referrer_url[1], zipcontent, '', '', ''))
55 return zipcontent.encode()
56
57
58 def get_host(request):
59 parsed_referrer_url = get_referrer_url(request)
60 if parsed_referrer_url:
61 host = urlunparse((parsed_referrer_url[0], parsed_referrer_url[1], '', '', '', ''))
62 else:
63 host = request.build_absolute_uri(OPTIONS['Deployment']['URL_PATH_PREFIX'])
64 return host.strip("/")
65
66
67 def _add_content_security_policy_header(request, response):
68 # restrict CSP to only allow resources to be loaded from the Kolibri host, to prevent info leakage
69 # (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage
70 host = get_host(request)
71 response["Content-Security-Policy"] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: " + host
72
73
74 class ZipContentView(View):
75
76 @xframe_options_exempt
77 def options(self, request, *args, **kwargs):
78 """
79 Handles OPTIONS requests which may be sent as "preflight CORS" requests to check permissions.
80 """
81 response = HttpResponse()
82 _add_access_control_headers(request, response)
83 return response
84
85 @vary_on_headers('X-Requested-With')
86 @cache_forever
87 @xframe_options_exempt
88 def get(self, request, zipped_filename, embedded_filepath):
89 """
90 Handles GET requests and serves a static file from within the zip file.
91 """
92
93 # calculate the local file path to the zip file
94 zipped_path = get_content_storage_file_path(zipped_filename)
95
96 # file size
97 file_size = 0
98
99 # if the zipfile does not exist on disk, return a 404
100 if not os.path.exists(zipped_path):
101 raise Http404('"%(filename)s" does not exist locally' % {'filename': zipped_filename})
102
103 # if client has a cached version, use that (we can safely assume nothing has changed, due to MD5)
104 if request.META.get('HTTP_IF_MODIFIED_SINCE'):
105 return HttpResponseNotModified()
106
107 with zipfile.ZipFile(zipped_path) as zf:
108
109 # if no path, or a directory, is being referenced, look for an index.html file
110 if not embedded_filepath or embedded_filepath.endswith("/"):
111 embedded_filepath += "index.html"
112
113 # get the details about the embedded file, and ensure it exists
114 try:
115 info = zf.getinfo(embedded_filepath)
116 except KeyError:
117 raise Http404('"{}" does not exist inside "{}"'.format(embedded_filepath, zipped_filename))
118
119 if (not request.is_ajax()) and zipped_path.endswith('zip') and (embedded_filepath.endswith('htm') or embedded_filepath.endswith('html')):
120 # Sets up our HTML5 zip file endpoint on Kolibri to serve up a
121 # special template that loads Hashi and then initializes it.
122 # Only do this when the request is not AJAX, as Hashi will fetch
123 # the real HTML file using an AJAX request, and presumably other
124 # dynamic loading of HTML content would also get confused if it
125 # got the special Hashi template back instead!
126 cache_key = 'hashi_bootstrap_html'
127 bootstrap_content = cache.get(cache_key)
128 if bootstrap_content is None:
129 template = loader.get_template('content/hashi.html')
130 bootstrap_content = template.render({}, None)
131 cache.set(cache_key, bootstrap_content)
132 response = HttpResponse(bootstrap_content)
133 _add_access_control_headers(request, response)
134 _add_content_security_policy_header(request, response)
135 return response
136
137 # try to guess the MIME type of the embedded file being referenced
138 content_type = mimetypes.guess_type(embedded_filepath)[0] or 'application/octet-stream'
139
140 if not os.path.splitext(embedded_filepath)[1] == '.json':
141 # generate a streaming response object, pulling data from within the zip file
142 response = FileResponse(zf.open(info), content_type=content_type)
143 file_size = info.file_size
144 else:
145 image_prefix_url = generate_image_prefix_url(request, zipped_filename)
146 # load the stream from json file into memory, replace the path_place_holder.
147 content = zf.open(info).read()
148 str_to_be_replaced = ('$' + exercises.IMG_PLACEHOLDER).encode()
149 content_with_path = content.replace(str_to_be_replaced, image_prefix_url)
150 response = HttpResponse(content_with_path, content_type=content_type)
151 file_size = len(content_with_path)
152
153 # set the content-length header to the size of the embedded file
154 if info.file_size:
155 response["Content-Length"] = file_size
156
157 # ensure the browser knows not to try byte-range requests, as we don't support them here
158 response["Accept-Ranges"] = "none"
159
160 # add headers to ensure AJAX requests will be permitted for these files, even from a null origin
161 _add_access_control_headers(request, response)
162 _add_content_security_policy_header(request, response)
163
164 return response
165
166
167 class DownloadContentView(View):
168
169 def get(self, request, filename, new_filename):
170 """
171 Handles GET requests and serves a static file as an attachment.
172 """
173
174 # calculate the local file path of the file
175 path = get_content_storage_file_path(filename)
176
177 # if the file does not exist on disk, return a 404
178 if not os.path.exists(path):
179 raise Http404('"%(filename)s" does not exist locally' % {'filename': filename})
180
181 # generate a file response
182 response = FileResponse(open(path, 'rb'))
183
184 # set the content-type by guessing from the filename
185 response['Content-Type'] = mimetypes.guess_type(filename)[0]
186
187 # set the content-disposition as attachment to force download
188 response['Content-Disposition'] = 'attachment;'
189
190 # set the content-length to the file size
191 response['Content-Length'] = os.path.getsize(path)
192
193 return response
194
[end of kolibri/core/content/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/content/views.py b/kolibri/core/content/views.py
--- a/kolibri/core/content/views.py
+++ b/kolibri/core/content/views.py
@@ -1,15 +1,20 @@
+import hashlib
+import io
import mimetypes
import os
import zipfile
+from django.conf import settings
from django.core.cache import cache
from django.http import Http404
from django.http import HttpResponse
from django.http.response import FileResponse
from django.http.response import HttpResponseNotModified
-from django.urls import reverse
from django.template import loader
+from django.urls import reverse
+from django.utils.decorators import method_decorator
from django.views.decorators.clickjacking import xframe_options_exempt
+from django.views.decorators.http import etag
from django.views.decorators.vary import vary_on_headers
from django.views.generic.base import View
from le_utils.constants import exercises
@@ -24,6 +29,16 @@
# https://www.thecodingforums.com/threads/mimetypes-guess_type-broken-in-windows-on-py2-7-and-python-3-x.952693/
mimetypes.init([os.path.join(os.path.dirname(__file__), 'constants', 'mime.types')])
+HASHI_FILENAME = None
+
+
+def get_hashi_filename():
+ global HASHI_FILENAME
+ if HASHI_FILENAME is None or getattr(settings, 'DEVELOPER_MODE', None):
+ with io.open(os.path.join(os.path.dirname(__file__), './build/hashi_filename'), mode='r', encoding='utf-8') as f:
+ HASHI_FILENAME = f.read().strip()
+ return HASHI_FILENAME
+
def _add_access_control_headers(request, response):
response["Access-Control-Allow-Origin"] = "*"
@@ -71,6 +86,21 @@
response["Content-Security-Policy"] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: " + host
+def calculate_zip_content_etag(request, zipped_filename, embedded_filepath):
+ zipped_path = get_content_storage_file_path(zipped_filename)
+
+ # if no path, or a directory, is being referenced, look for an index.html file
+ if not embedded_filepath or embedded_filepath.endswith("/"):
+ embedded_filepath += "index.html"
+
+ # Are we returning the Hashi bootstrap html? In which case the etag should change
+ # along with the built file asset of the Hashi client library.
+ if (not request.is_ajax()) and zipped_path.endswith('zip') and (embedded_filepath.endswith('htm') or embedded_filepath.endswith('html')):
+ return hashlib.md5(get_hashi_filename().encode('utf-8')).hexdigest()
+
+ return hashlib.md5((zipped_filename + embedded_filepath).encode('utf-8')).hexdigest()
+
+
class ZipContentView(View):
@xframe_options_exempt
@@ -85,6 +115,7 @@
@vary_on_headers('X-Requested-With')
@cache_forever
@xframe_options_exempt
+ @method_decorator(etag(calculate_zip_content_etag))
def get(self, request, zipped_filename, embedded_filepath):
"""
Handles GET requests and serves a static file from within the zip file.
@@ -127,7 +158,8 @@
bootstrap_content = cache.get(cache_key)
if bootstrap_content is None:
template = loader.get_template('content/hashi.html')
- bootstrap_content = template.render({}, None)
+ hashi_path = "content/{filename}".format(filename=get_hashi_filename())
+ bootstrap_content = template.render({"hashi_path": hashi_path}, None)
cache.set(cache_key, bootstrap_content)
response = HttpResponse(bootstrap_content)
_add_access_control_headers(request, response)
| {"golden_diff": "diff --git a/kolibri/core/content/views.py b/kolibri/core/content/views.py\n--- a/kolibri/core/content/views.py\n+++ b/kolibri/core/content/views.py\n@@ -1,15 +1,20 @@\n+import hashlib\n+import io\n import mimetypes\n import os\n import zipfile\n \n+from django.conf import settings\n from django.core.cache import cache\n from django.http import Http404\n from django.http import HttpResponse\n from django.http.response import FileResponse\n from django.http.response import HttpResponseNotModified\n-from django.urls import reverse\n from django.template import loader\n+from django.urls import reverse\n+from django.utils.decorators import method_decorator\n from django.views.decorators.clickjacking import xframe_options_exempt\n+from django.views.decorators.http import etag\n from django.views.decorators.vary import vary_on_headers\n from django.views.generic.base import View\n from le_utils.constants import exercises\n@@ -24,6 +29,16 @@\n # https://www.thecodingforums.com/threads/mimetypes-guess_type-broken-in-windows-on-py2-7-and-python-3-x.952693/\n mimetypes.init([os.path.join(os.path.dirname(__file__), 'constants', 'mime.types')])\n \n+HASHI_FILENAME = None\n+\n+\n+def get_hashi_filename():\n+ global HASHI_FILENAME\n+ if HASHI_FILENAME is None or getattr(settings, 'DEVELOPER_MODE', None):\n+ with io.open(os.path.join(os.path.dirname(__file__), './build/hashi_filename'), mode='r', encoding='utf-8') as f:\n+ HASHI_FILENAME = f.read().strip()\n+ return HASHI_FILENAME\n+\n \n def _add_access_control_headers(request, response):\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n@@ -71,6 +86,21 @@\n response[\"Content-Security-Policy\"] = \"default-src 'self' 'unsafe-inline' 'unsafe-eval' data: \" + host\n \n \n+def calculate_zip_content_etag(request, zipped_filename, embedded_filepath):\n+ zipped_path = get_content_storage_file_path(zipped_filename)\n+\n+ # if no path, or a directory, is being referenced, look for an index.html file\n+ if not embedded_filepath or embedded_filepath.endswith(\"/\"):\n+ embedded_filepath += \"index.html\"\n+\n+ # Are we returning the Hashi bootstrap html? In which case the etag should change\n+ # along with the built file asset of the Hashi client library.\n+ if (not request.is_ajax()) and zipped_path.endswith('zip') and (embedded_filepath.endswith('htm') or embedded_filepath.endswith('html')):\n+ return hashlib.md5(get_hashi_filename().encode('utf-8')).hexdigest()\n+\n+ return hashlib.md5((zipped_filename + embedded_filepath).encode('utf-8')).hexdigest()\n+\n+\n class ZipContentView(View):\n \n @xframe_options_exempt\n@@ -85,6 +115,7 @@\n @vary_on_headers('X-Requested-With')\n @cache_forever\n @xframe_options_exempt\n+ @method_decorator(etag(calculate_zip_content_etag))\n def get(self, request, zipped_filename, embedded_filepath):\n \"\"\"\n Handles GET requests and serves a static file from within the zip file.\n@@ -127,7 +158,8 @@\n bootstrap_content = cache.get(cache_key)\n if bootstrap_content is None:\n template = loader.get_template('content/hashi.html')\n- bootstrap_content = template.render({}, None)\n+ hashi_path = \"content/{filename}\".format(filename=get_hashi_filename())\n+ bootstrap_content = template.render({\"hashi_path\": hashi_path}, None)\n cache.set(cache_key, bootstrap_content)\n response = HttpResponse(bootstrap_content)\n _add_access_control_headers(request, response)\n", "issue": "Hashi no hash-y\n### Observed behavior\r\nCurrently we build hashi without a build hash. This means that in the future if we upgrade our Hashi functionality, this may not get cache busted.\r\n\r\n### Expected behavior\r\nNew versions of Hashi should cache bust old versions!\r\n\r\n### User-facing consequences\r\nWeird, unpredictable Hashi behaviour, depending on previous use.\nNot possible to 'complete' an HTML5 App\n### Observed behavior\r\nIn introducing Hashi, the 15s insta-points was removed from HTML5 apps. Now impossible to register any progress.\r\n\r\n### Expected behavior\r\nWhat should progress look like for an arbitrary HTML5 app?\r\n\r\n### User-facing consequences\r\nConfusion?\n", "before_files": [{"content": "import mimetypes\nimport os\nimport zipfile\n\nfrom django.core.cache import cache\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http.response import FileResponse\nfrom django.http.response import HttpResponseNotModified\nfrom django.urls import reverse\nfrom django.template import loader\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.vary import vary_on_headers\nfrom django.views.generic.base import View\nfrom le_utils.constants import exercises\nfrom six.moves.urllib.parse import urlparse\nfrom six.moves.urllib.parse import urlunparse\n\nfrom .api import cache_forever\nfrom .utils.paths import get_content_storage_file_path\nfrom kolibri.utils.conf import OPTIONS\n\n# Do this to prevent import of broken Windows filetype registry that makes guesstype not work.\n# https://www.thecodingforums.com/threads/mimetypes-guess_type-broken-in-windows-on-py2-7-and-python-3-x.952693/\nmimetypes.init([os.path.join(os.path.dirname(__file__), 'constants', 'mime.types')])\n\n\ndef _add_access_control_headers(request, response):\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n response[\"Access-Control-Allow-Methods\"] = \"GET, OPTIONS\"\n requested_headers = request.META.get(\"HTTP_ACCESS_CONTROL_REQUEST_HEADERS\", \"\")\n if requested_headers:\n response[\"Access-Control-Allow-Headers\"] = requested_headers\n\n\ndef get_referrer_url(request):\n if request.META.get('HTTP_REFERER'):\n # If available use HTTP_REFERER to infer the host as that will give us more\n # information if Kolibri is behind a proxy.\n return urlparse(request.META.get('HTTP_REFERER'))\n\n\ndef generate_image_prefix_url(request, zipped_filename):\n parsed_referrer_url = get_referrer_url(request)\n # Remove trailing slash\n zipcontent = reverse(\n 'kolibri:core:zipcontent',\n kwargs={\n \"zipped_filename\": zipped_filename,\n \"embedded_filepath\": ''\n })[:-1]\n if parsed_referrer_url:\n # Reconstruct the parsed URL using a blank scheme and host + port(1)\n zipcontent = urlunparse(('', parsed_referrer_url[1], zipcontent, '', '', ''))\n return zipcontent.encode()\n\n\ndef get_host(request):\n parsed_referrer_url = get_referrer_url(request)\n if parsed_referrer_url:\n host = urlunparse((parsed_referrer_url[0], parsed_referrer_url[1], '', '', '', ''))\n else:\n host = request.build_absolute_uri(OPTIONS['Deployment']['URL_PATH_PREFIX'])\n return host.strip(\"/\")\n\n\ndef _add_content_security_policy_header(request, response):\n # restrict CSP to only allow resources to be loaded from the Kolibri host, to prevent info leakage\n # (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage\n host = get_host(request)\n response[\"Content-Security-Policy\"] = \"default-src 'self' 'unsafe-inline' 'unsafe-eval' data: \" + host\n\n\nclass ZipContentView(View):\n\n @xframe_options_exempt\n def options(self, request, *args, **kwargs):\n \"\"\"\n Handles OPTIONS requests which may be sent as \"preflight CORS\" requests to check permissions.\n \"\"\"\n response = HttpResponse()\n _add_access_control_headers(request, response)\n return response\n\n @vary_on_headers('X-Requested-With')\n @cache_forever\n @xframe_options_exempt\n def get(self, request, zipped_filename, embedded_filepath):\n \"\"\"\n Handles GET requests and serves a static file from within the zip file.\n \"\"\"\n\n # calculate the local file path to the zip file\n zipped_path = get_content_storage_file_path(zipped_filename)\n\n # file size\n file_size = 0\n\n # if the zipfile does not exist on disk, return a 404\n if not os.path.exists(zipped_path):\n raise Http404('\"%(filename)s\" does not exist locally' % {'filename': zipped_filename})\n\n # if client has a cached version, use that (we can safely assume nothing has changed, due to MD5)\n if request.META.get('HTTP_IF_MODIFIED_SINCE'):\n return HttpResponseNotModified()\n\n with zipfile.ZipFile(zipped_path) as zf:\n\n # if no path, or a directory, is being referenced, look for an index.html file\n if not embedded_filepath or embedded_filepath.endswith(\"/\"):\n embedded_filepath += \"index.html\"\n\n # get the details about the embedded file, and ensure it exists\n try:\n info = zf.getinfo(embedded_filepath)\n except KeyError:\n raise Http404('\"{}\" does not exist inside \"{}\"'.format(embedded_filepath, zipped_filename))\n\n if (not request.is_ajax()) and zipped_path.endswith('zip') and (embedded_filepath.endswith('htm') or embedded_filepath.endswith('html')):\n # Sets up our HTML5 zip file endpoint on Kolibri to serve up a\n # special template that loads Hashi and then initializes it.\n # Only do this when the request is not AJAX, as Hashi will fetch\n # the real HTML file using an AJAX request, and presumably other\n # dynamic loading of HTML content would also get confused if it\n # got the special Hashi template back instead!\n cache_key = 'hashi_bootstrap_html'\n bootstrap_content = cache.get(cache_key)\n if bootstrap_content is None:\n template = loader.get_template('content/hashi.html')\n bootstrap_content = template.render({}, None)\n cache.set(cache_key, bootstrap_content)\n response = HttpResponse(bootstrap_content)\n _add_access_control_headers(request, response)\n _add_content_security_policy_header(request, response)\n return response\n\n # try to guess the MIME type of the embedded file being referenced\n content_type = mimetypes.guess_type(embedded_filepath)[0] or 'application/octet-stream'\n\n if not os.path.splitext(embedded_filepath)[1] == '.json':\n # generate a streaming response object, pulling data from within the zip file\n response = FileResponse(zf.open(info), content_type=content_type)\n file_size = info.file_size\n else:\n image_prefix_url = generate_image_prefix_url(request, zipped_filename)\n # load the stream from json file into memory, replace the path_place_holder.\n content = zf.open(info).read()\n str_to_be_replaced = ('$' + exercises.IMG_PLACEHOLDER).encode()\n content_with_path = content.replace(str_to_be_replaced, image_prefix_url)\n response = HttpResponse(content_with_path, content_type=content_type)\n file_size = len(content_with_path)\n\n # set the content-length header to the size of the embedded file\n if info.file_size:\n response[\"Content-Length\"] = file_size\n\n # ensure the browser knows not to try byte-range requests, as we don't support them here\n response[\"Accept-Ranges\"] = \"none\"\n\n # add headers to ensure AJAX requests will be permitted for these files, even from a null origin\n _add_access_control_headers(request, response)\n _add_content_security_policy_header(request, response)\n\n return response\n\n\nclass DownloadContentView(View):\n\n def get(self, request, filename, new_filename):\n \"\"\"\n Handles GET requests and serves a static file as an attachment.\n \"\"\"\n\n # calculate the local file path of the file\n path = get_content_storage_file_path(filename)\n\n # if the file does not exist on disk, return a 404\n if not os.path.exists(path):\n raise Http404('\"%(filename)s\" does not exist locally' % {'filename': filename})\n\n # generate a file response\n response = FileResponse(open(path, 'rb'))\n\n # set the content-type by guessing from the filename\n response['Content-Type'] = mimetypes.guess_type(filename)[0]\n\n # set the content-disposition as attachment to force download\n response['Content-Disposition'] = 'attachment;'\n\n # set the content-length to the file size\n response['Content-Length'] = os.path.getsize(path)\n\n return response\n", "path": "kolibri/core/content/views.py"}]} | 2,922 | 827 |
gh_patches_debug_37629 | rasdani/github-patches | git_diff | medtagger__MedTagger-202 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User personal information cannot be changed
## Expected Behavior
User can change its personal information at any time using Settings page.
User should only be able to change their first and last name. The E-mail field should be grayed out.
## Actual Behavior
Form fields are filled with user data but there is no way to change them. User cannot modify these fields and there is no "Save" button available.
## Steps to Reproduce the Problem
1. Go to Settings page.
2. Open "User data" section.
3. Done.
## Additional comment
Consider changing the name of the section from "User data" to something else.
</issue>
<code>
[start of backend/medtagger/api/users/service.py]
1 """Module responsible for defining endpoints for users administration."""
2 from typing import Any
3
4 from flask import request
5 from flask_restplus import Resource
6
7 from medtagger.api import api
8 from medtagger.api.users import serializers
9 from medtagger.api.users.business import get_all_users, set_user_role
10 from medtagger.api.utils import get_current_user
11 from medtagger.api.security import login_required, role_required
12
13 users_ns = api.namespace('users', 'Users management')
14
15
16 @users_ns.route('/')
17 class GetUsers(Resource):
18 """Get all users endpoint."""
19
20 @staticmethod
21 @login_required
22 @role_required('admin')
23 @users_ns.marshal_with(serializers.users_list)
24 @users_ns.doc(security='token')
25 def get() -> Any:
26 """Get all users endpoint."""
27 users = get_all_users()
28 return {'users': users}, 200
29
30
31 @users_ns.route('/<int:user_id>/role')
32 class SetRole(Resource):
33 """Set user's role."""
34
35 @staticmethod
36 @login_required
37 @role_required('admin')
38 @users_ns.doc(security='token')
39 def put(user_id: int) -> Any:
40 """Set user's role."""
41 set_user_role(user_id, request.json['role'])
42 return {}, 204
43
44
45 @users_ns.route('/info')
46 class GetUserInfo(Resource):
47 """Get current user information."""
48
49 @staticmethod
50 @login_required
51 @users_ns.marshal_with(serializers.user)
52 @users_ns.doc(security='token')
53 @users_ns.doc(responses={200: 'Successfully retrieved data.'})
54 def get() -> Any:
55 """Get user info."""
56 user = get_current_user()
57 return user, 200
58
[end of backend/medtagger/api/users/service.py]
[start of backend/medtagger/repositories/users.py]
1 """Module responsible for definition of Users' Repository."""
2 from typing import List, Optional
3
4 from medtagger.database import db_session
5 from medtagger.database.models import User
6
7
8 class UsersRepository(object):
9 """Repository for Users."""
10
11 @staticmethod
12 def add_new_user(new_user: User) -> int:
13 """Add new user.
14
15 :return: id of the new user
16 """
17 with db_session() as session:
18 session.add(new_user)
19 return new_user.id
20
21 @staticmethod
22 def get_all_users() -> List[User]:
23 """Return list of all users."""
24 return User.query.order_by(User.id).all()
25
26 @staticmethod
27 def get_user_by_email(user_email: str) -> Optional[User]:
28 """Get user with given email.
29
30 :return Optional of User
31 """
32 return User.query.filter(User.email == user_email).first()
33
34 @staticmethod
35 def get_user_by_id(user_id: int) -> User:
36 """Get user with given id."""
37 return User.query.filter(User.id == user_id).one()
38
[end of backend/medtagger/repositories/users.py]
[start of backend/medtagger/api/users/business.py]
1 """Module responsible for business logic for users administration."""
2 from typing import List
3
4 from medtagger.database.models import User
5 from medtagger.repositories.users import UsersRepository
6 from medtagger.repositories.roles import RolesRepository
7
8
9 def get_all_users() -> List[User]:
10 """Return list of all users."""
11 return UsersRepository.get_all_users()
12
13
14 def set_user_role(user_id: int, role_name: str) -> None:
15 """Set user's role. Old role is being replaced."""
16 RolesRepository.set_user_role(user_id, role_name)
17
[end of backend/medtagger/api/users/business.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/medtagger/api/users/business.py b/backend/medtagger/api/users/business.py
--- a/backend/medtagger/api/users/business.py
+++ b/backend/medtagger/api/users/business.py
@@ -1,6 +1,9 @@
"""Module responsible for business logic for users administration."""
from typing import List
+from sqlalchemy.orm.exc import NoResultFound
+
+from medtagger.api import InvalidArgumentsException
from medtagger.database.models import User
from medtagger.repositories.users import UsersRepository
from medtagger.repositories.roles import RolesRepository
@@ -14,3 +17,12 @@
def set_user_role(user_id: int, role_name: str) -> None:
"""Set user's role. Old role is being replaced."""
RolesRepository.set_user_role(user_id, role_name)
+
+
+def set_user_info(user_id: int, firstName: str, lastName: str) -> None:
+ """Set user's information."""
+ try:
+ user = UsersRepository.get_user_by_id(user_id)
+ UsersRepository.set_user_info(user, firstName, lastName)
+ except NoResultFound:
+ raise InvalidArgumentsException('User with this id does not exist.')
diff --git a/backend/medtagger/api/users/service.py b/backend/medtagger/api/users/service.py
--- a/backend/medtagger/api/users/service.py
+++ b/backend/medtagger/api/users/service.py
@@ -6,7 +6,7 @@
from medtagger.api import api
from medtagger.api.users import serializers
-from medtagger.api.users.business import get_all_users, set_user_role
+from medtagger.api.users.business import get_all_users, set_user_role, set_user_info
from medtagger.api.utils import get_current_user
from medtagger.api.security import login_required, role_required
@@ -55,3 +55,18 @@
"""Get user info."""
user = get_current_user()
return user, 200
+
+
+@users_ns.route('/<int:user_id>')
+class SetUserInfo(Resource):
+ """Set user's information (first name and last name)."""
+
+ @staticmethod
+ @login_required
+ @users_ns.doc(security='token')
+ def put(user_id: int) -> Any:
+ """Set user info."""
+ if get_current_user().id != user_id:
+ return {}, 403
+ set_user_info(user_id, request.json['firstName'], request.json['lastName'])
+ return {}, 204
diff --git a/backend/medtagger/repositories/users.py b/backend/medtagger/repositories/users.py
--- a/backend/medtagger/repositories/users.py
+++ b/backend/medtagger/repositories/users.py
@@ -35,3 +35,11 @@
def get_user_by_id(user_id: int) -> User:
"""Get user with given id."""
return User.query.filter(User.id == user_id).one()
+
+ @staticmethod
+ def set_user_info(user: User, firstName: str, lastName: str) -> None:
+ """Set user's info."""
+ with db_session() as session:
+ user.first_name = firstName
+ user.last_name = lastName
+ session.add(user)
| {"golden_diff": "diff --git a/backend/medtagger/api/users/business.py b/backend/medtagger/api/users/business.py\n--- a/backend/medtagger/api/users/business.py\n+++ b/backend/medtagger/api/users/business.py\n@@ -1,6 +1,9 @@\n \"\"\"Module responsible for business logic for users administration.\"\"\"\n from typing import List\n \n+from sqlalchemy.orm.exc import NoResultFound\n+\n+from medtagger.api import InvalidArgumentsException\n from medtagger.database.models import User\n from medtagger.repositories.users import UsersRepository\n from medtagger.repositories.roles import RolesRepository\n@@ -14,3 +17,12 @@\n def set_user_role(user_id: int, role_name: str) -> None:\n \"\"\"Set user's role. Old role is being replaced.\"\"\"\n RolesRepository.set_user_role(user_id, role_name)\n+\n+\n+def set_user_info(user_id: int, firstName: str, lastName: str) -> None:\n+ \"\"\"Set user's information.\"\"\"\n+ try:\n+ user = UsersRepository.get_user_by_id(user_id)\n+ UsersRepository.set_user_info(user, firstName, lastName)\n+ except NoResultFound:\n+ raise InvalidArgumentsException('User with this id does not exist.')\ndiff --git a/backend/medtagger/api/users/service.py b/backend/medtagger/api/users/service.py\n--- a/backend/medtagger/api/users/service.py\n+++ b/backend/medtagger/api/users/service.py\n@@ -6,7 +6,7 @@\n \n from medtagger.api import api\n from medtagger.api.users import serializers\n-from medtagger.api.users.business import get_all_users, set_user_role\n+from medtagger.api.users.business import get_all_users, set_user_role, set_user_info\n from medtagger.api.utils import get_current_user\n from medtagger.api.security import login_required, role_required\n \n@@ -55,3 +55,18 @@\n \"\"\"Get user info.\"\"\"\n user = get_current_user()\n return user, 200\n+\n+\n+@users_ns.route('/<int:user_id>')\n+class SetUserInfo(Resource):\n+ \"\"\"Set user's information (first name and last name).\"\"\"\n+\n+ @staticmethod\n+ @login_required\n+ @users_ns.doc(security='token')\n+ def put(user_id: int) -> Any:\n+ \"\"\"Set user info.\"\"\"\n+ if get_current_user().id != user_id:\n+ return {}, 403\n+ set_user_info(user_id, request.json['firstName'], request.json['lastName'])\n+ return {}, 204\ndiff --git a/backend/medtagger/repositories/users.py b/backend/medtagger/repositories/users.py\n--- a/backend/medtagger/repositories/users.py\n+++ b/backend/medtagger/repositories/users.py\n@@ -35,3 +35,11 @@\n def get_user_by_id(user_id: int) -> User:\n \"\"\"Get user with given id.\"\"\"\n return User.query.filter(User.id == user_id).one()\n+\n+ @staticmethod\n+ def set_user_info(user: User, firstName: str, lastName: str) -> None:\n+ \"\"\"Set user's info.\"\"\"\n+ with db_session() as session:\n+ user.first_name = firstName\n+ user.last_name = lastName\n+ session.add(user)\n", "issue": "User personal information cannot be changed\n## Expected Behavior\r\n\r\nUser can change its personal information at any time using Settings page.\r\n\r\nUser should only be able to change their first and last name. The E-mail field should be grayed out.\r\n\r\n## Actual Behavior\r\n\r\nForm fields are filled with user data but there is no way to change them. User cannot modify these fields and there is no \"Save\" button available.\r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Go to Settings page.\r\n 2. Open \"User data\" section.\r\n 3. Done.\r\n\r\n## Additional comment\r\n\r\nConsider changing the name of the section from \"User data\" to something else.\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for defining endpoints for users administration.\"\"\"\nfrom typing import Any\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom medtagger.api import api\nfrom medtagger.api.users import serializers\nfrom medtagger.api.users.business import get_all_users, set_user_role\nfrom medtagger.api.utils import get_current_user\nfrom medtagger.api.security import login_required, role_required\n\nusers_ns = api.namespace('users', 'Users management')\n\n\n@users_ns.route('/')\nclass GetUsers(Resource):\n \"\"\"Get all users endpoint.\"\"\"\n\n @staticmethod\n @login_required\n @role_required('admin')\n @users_ns.marshal_with(serializers.users_list)\n @users_ns.doc(security='token')\n def get() -> Any:\n \"\"\"Get all users endpoint.\"\"\"\n users = get_all_users()\n return {'users': users}, 200\n\n\n@users_ns.route('/<int:user_id>/role')\nclass SetRole(Resource):\n \"\"\"Set user's role.\"\"\"\n\n @staticmethod\n @login_required\n @role_required('admin')\n @users_ns.doc(security='token')\n def put(user_id: int) -> Any:\n \"\"\"Set user's role.\"\"\"\n set_user_role(user_id, request.json['role'])\n return {}, 204\n\n\n@users_ns.route('/info')\nclass GetUserInfo(Resource):\n \"\"\"Get current user information.\"\"\"\n\n @staticmethod\n @login_required\n @users_ns.marshal_with(serializers.user)\n @users_ns.doc(security='token')\n @users_ns.doc(responses={200: 'Successfully retrieved data.'})\n def get() -> Any:\n \"\"\"Get user info.\"\"\"\n user = get_current_user()\n return user, 200\n", "path": "backend/medtagger/api/users/service.py"}, {"content": "\"\"\"Module responsible for definition of Users' Repository.\"\"\"\nfrom typing import List, Optional\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import User\n\n\nclass UsersRepository(object):\n \"\"\"Repository for Users.\"\"\"\n\n @staticmethod\n def add_new_user(new_user: User) -> int:\n \"\"\"Add new user.\n\n :return: id of the new user\n \"\"\"\n with db_session() as session:\n session.add(new_user)\n return new_user.id\n\n @staticmethod\n def get_all_users() -> List[User]:\n \"\"\"Return list of all users.\"\"\"\n return User.query.order_by(User.id).all()\n\n @staticmethod\n def get_user_by_email(user_email: str) -> Optional[User]:\n \"\"\"Get user with given email.\n\n :return Optional of User\n \"\"\"\n return User.query.filter(User.email == user_email).first()\n\n @staticmethod\n def get_user_by_id(user_id: int) -> User:\n \"\"\"Get user with given id.\"\"\"\n return User.query.filter(User.id == user_id).one()\n", "path": "backend/medtagger/repositories/users.py"}, {"content": "\"\"\"Module responsible for business logic for users administration.\"\"\"\nfrom typing import List\n\nfrom medtagger.database.models import User\nfrom medtagger.repositories.users import UsersRepository\nfrom medtagger.repositories.roles import RolesRepository\n\n\ndef get_all_users() -> List[User]:\n \"\"\"Return list of all users.\"\"\"\n return UsersRepository.get_all_users()\n\n\ndef set_user_role(user_id: int, role_name: str) -> None:\n \"\"\"Set user's role. Old role is being replaced.\"\"\"\n RolesRepository.set_user_role(user_id, role_name)\n", "path": "backend/medtagger/api/users/business.py"}]} | 1,658 | 720 |
gh_patches_debug_31433 | rasdani/github-patches | git_diff | Theano__Theano-1047 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cutils fails when cutils_ext already present elsewhere in sys.path
EDIT: Now it work with the c|py linker, but not with the cvm linker.
Theano's cutils moudle expects to find cutils_ext inside config.compiledir.
In cutils.py, cutils_ext.cutils_ext will be loaded. On an import error, this extension will be built and eventually cmodule.dlimport will re-import cutils_ext.cutils_ext. dlimport will inject config.compiledir into the front of sys,path, so the cutils_ext should always be loaded from config.compiledir.
Normally, this will then allow cutils_ext.cutils_ext to be loaded. The problem is if the cutils_ext package was found elsewhere on sys.path, but not the cutils_ext module (extension) within that package. cutils.py lacks the sys.path injection of dlimport. The consequence is that the import of cutils_ext.cutils_ext in dlimport fails because the reference to the outer cutils_ext (the package) will be the incorrect path!
Needless to say, when a user encounters this bug, it is a pain to solve.
The solution is to ensure that the cutils.py try statement starts with:
sys.path.insert(0,theano.config.compiledir)
with the finally doing:
del sys.path[0]
</issue>
<code>
[start of theano/gof/cutils.py]
1 import os
2 import sys
3 from compilelock import get_lock, release_lock
4 from theano import config
5
6 # TODO These two lines may be removed in the future, when we are 100% sure
7 # noone has an old cutils_ext.so lying around anymore.
8 if os.path.exists(os.path.join(config.compiledir, 'cutils_ext.so')):
9 os.remove(os.path.join(config.compiledir, 'cutils_ext.so'))
10
11
12 def compile_cutils():
13 """Do just the compilation of cutils_ext"""
14 code = """
15 #include <Python.h>
16 extern "C"{
17 static PyObject *
18 run_cthunk(PyObject *self, PyObject *args)
19 {
20 PyObject *py_cthunk = NULL;
21 if(!PyArg_ParseTuple(args,"O",&py_cthunk))
22 return NULL;
23
24 if (!PyCObject_Check(py_cthunk)) {
25 PyErr_SetString(PyExc_ValueError,
26 "Argument to run_cthunk must be a PyCObject.");
27 return NULL;
28 }
29 void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);
30 int (*fn)(void*) = (int (*)(void*))(ptr_addr);
31 void* it = PyCObject_GetDesc(py_cthunk);
32 int failure = fn(it);
33
34 return Py_BuildValue("i", failure);
35 }
36
37 static PyMethodDef CutilsExtMethods[] = {
38 {"run_cthunk", run_cthunk, METH_VARARGS|METH_KEYWORDS,
39 "Run a theano cthunk."},
40 {NULL, NULL, 0, NULL} /* Sentinel */
41 };
42
43 PyMODINIT_FUNC
44 initcutils_ext(void)
45 {
46 (void) Py_InitModule("cutils_ext", CutilsExtMethods);
47 }
48 }
49 """
50
51 loc = os.path.join(config.compiledir, 'cutils_ext')
52 if not os.path.exists(loc):
53 os.mkdir(loc)
54
55 args = cmodule.GCC_compiler.compile_args()
56 cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,
57 preargs=args)
58
59 try:
60 # See gh issue #728 for why these lines are here. Summary: compiledir
61 # must be at the beginning of the path to avoid conflicts with any other
62 # cutils_ext modules that might exist. An __init__.py file must be created
63 # for the same reason. Note that these 5 lines may seem redundant (they are
64 # repeated in compile_str()) but if another cutils_ext does exist then it
65 # will be imported and compile_str won't get called at all.
66 sys.path.insert(0, config.compiledir)
67 location = os.path.join(config.compiledir, 'cutils_ext')
68 if not os.path.exists(location):
69 os.mkdir(location)
70 file(os.path.join(location, "__init__.py"), 'w').close()
71
72 try:
73 from cutils_ext.cutils_ext import *
74 except ImportError:
75 import cmodule
76
77 get_lock()
78 # Ensure no-one else is currently modifying the content of the compilation
79 # directory. This is important to prevent multiple processes from trying to
80 # compile the cutils_ext module simultaneously.
81 try:
82 try:
83 # We must retry to import it as some other processs could
84 # have been compiling it between the first failed import
85 # and when we receive the lock
86 from cutils_ext.cutils_ext import *
87 except ImportError:
88 import cmodule
89
90 compile_cutils()
91 from cutils_ext.cutils_ext import *
92
93 finally:
94 # Release lock on compilation directory.
95 release_lock()
96 finally:
97 if sys.path[0] == config.compiledir:
98 del sys.path[0]
99
[end of theano/gof/cutils.py]
[start of theano/gof/lazylinker_c.py]
1 import os, logging, sys
2
3 import theano
4 from theano import config
5 from theano.gof.compilelock import get_lock, release_lock
6 from theano.gof import cmodule
7
8 _logger = logging.getLogger('theano.gof.lazylinker_c')
9
10 force_compile = False
11 version = 0.20 # must match constant returned in function get_version()
12
13 def try_import():
14 global lazylinker_ext
15 sys.path[0:0] = [config.compiledir]
16 import lazylinker_ext
17 del sys.path[0]
18
19 def try_reload():
20 sys.path[0:0] = [config.compiledir]
21 reload(lazylinker_ext)
22 del sys.path[0]
23
24 try:
25 _need_reload = False
26 if force_compile:
27 raise ImportError()
28 else:
29 try_import()
30 _need_reload = True
31 if version != getattr(lazylinker_ext, '_version', None):
32 raise ImportError()
33 except ImportError:
34 get_lock()
35 try:
36 # Maybe someone else already finished compiling it while we were
37 # waiting for the lock?
38 try:
39 if force_compile:
40 raise ImportError()
41 if _need_reload:
42 # The module was successfully imported earlier: we need to
43 # reload it to check if the version was updated.
44 try_reload()
45 else:
46 try_import()
47 _need_reload = True
48 if version != getattr(lazylinker_ext, '_version', None):
49 raise ImportError()
50 except ImportError:
51 _logger.info("Compiling new CVM")
52 dirname = 'lazylinker_ext'
53 # We use a .txt extensions as otherwise it don't get
54 # included when we create a package to send to pypi
55 # This happen even if we tell to include *.c files
56 cfile = os.path.join(theano.__path__[0], 'gof', 'lazylinker_c.c.txt')
57 code = open(cfile).read()
58 loc = os.path.join(config.compiledir, dirname)
59 if not os.path.exists(loc):
60 os.mkdir(loc)
61 args = cmodule.GCC_compiler.compile_args()
62 cmodule.GCC_compiler.compile_str(dirname, code, location=loc,
63 preargs=args)
64 # Save version into the __init__.py file.
65 init_py = os.path.join(loc, '__init__.py')
66 open(init_py, 'w').write('_version = %s\n' % version)
67 # If we just compiled the module for the first time, then it was
68 # imported at the same time: we need to make sure we do not
69 # reload the now outdated __init__.pyc below.
70 init_pyc = os.path.join(loc, '__init__.pyc')
71 if os.path.isfile(init_pyc):
72 os.remove(init_pyc)
73 try_import()
74 try_reload()
75 from lazylinker_ext import lazylinker_ext as lazy_c
76 assert (lazylinker_ext._version ==
77 lazy_c.get_version())
78 _logger.info("New version %s", lazylinker_ext._version)
79 finally:
80 # Release lock on compilation directory.
81 release_lock()
82
83 from lazylinker_ext.lazylinker_ext import *
84 assert force_compile or (version == get_version())
85
[end of theano/gof/lazylinker_c.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/theano/gof/cutils.py b/theano/gof/cutils.py
--- a/theano/gof/cutils.py
+++ b/theano/gof/cutils.py
@@ -67,7 +67,8 @@
location = os.path.join(config.compiledir, 'cutils_ext')
if not os.path.exists(location):
os.mkdir(location)
- file(os.path.join(location, "__init__.py"), 'w').close()
+ if not os.path.exists(os.path.join(location, '__init__.py')):
+ file(os.path.join(location, '__init__.py'), 'w').close()
try:
from cutils_ext.cutils_ext import *
@@ -80,7 +81,7 @@
# compile the cutils_ext module simultaneously.
try:
try:
- # We must retry to import it as some other processs could
+ # We must retry to import it as some other process could
# have been compiling it between the first failed import
# and when we receive the lock
from cutils_ext.cutils_ext import *
diff --git a/theano/gof/lazylinker_c.py b/theano/gof/lazylinker_c.py
--- a/theano/gof/lazylinker_c.py
+++ b/theano/gof/lazylinker_c.py
@@ -22,6 +22,19 @@
del sys.path[0]
try:
+ # See gh issue #728 for why these lines are here. Summary: compiledir must
+ # be at the beginning of the path to avoid conflicts with any other
+ # lazylinker_ext modules that might exist (this step handled in try_import
+ # and try_reload). An __init__.py file must be created for the same reason.
+ # Note that these lines may seem redundant (they are repeated in
+ # compile_str()) but if another lazylinker_ext does exist then it will be
+ # imported and compile_str won't get called at all.
+ location = os.path.join(config.compiledir, 'lazylinker_ext')
+ if not os.path.exists(location):
+ os.mkdir(location)
+ if not os.path.exists(os.path.join(location, '__init__.py')):
+ file(os.path.join(location, '__init__.py'), 'w').close()
+
_need_reload = False
if force_compile:
raise ImportError()
| {"golden_diff": "diff --git a/theano/gof/cutils.py b/theano/gof/cutils.py\n--- a/theano/gof/cutils.py\n+++ b/theano/gof/cutils.py\n@@ -67,7 +67,8 @@\n location = os.path.join(config.compiledir, 'cutils_ext')\n if not os.path.exists(location):\n os.mkdir(location)\n- file(os.path.join(location, \"__init__.py\"), 'w').close()\n+ if not os.path.exists(os.path.join(location, '__init__.py')):\n+ file(os.path.join(location, '__init__.py'), 'w').close()\n \n try:\n from cutils_ext.cutils_ext import *\n@@ -80,7 +81,7 @@\n # compile the cutils_ext module simultaneously.\n try:\n try:\n- # We must retry to import it as some other processs could\n+ # We must retry to import it as some other process could\n # have been compiling it between the first failed import\n # and when we receive the lock\n from cutils_ext.cutils_ext import *\ndiff --git a/theano/gof/lazylinker_c.py b/theano/gof/lazylinker_c.py\n--- a/theano/gof/lazylinker_c.py\n+++ b/theano/gof/lazylinker_c.py\n@@ -22,6 +22,19 @@\n del sys.path[0]\n \n try:\n+ # See gh issue #728 for why these lines are here. Summary: compiledir must\n+ # be at the beginning of the path to avoid conflicts with any other\n+ # lazylinker_ext modules that might exist (this step handled in try_import\n+ # and try_reload). An __init__.py file must be created for the same reason.\n+ # Note that these lines may seem redundant (they are repeated in\n+ # compile_str()) but if another lazylinker_ext does exist then it will be\n+ # imported and compile_str won't get called at all.\n+ location = os.path.join(config.compiledir, 'lazylinker_ext')\n+ if not os.path.exists(location):\n+ os.mkdir(location)\n+ if not os.path.exists(os.path.join(location, '__init__.py')):\n+ file(os.path.join(location, '__init__.py'), 'w').close()\n+\n _need_reload = False\n if force_compile:\n raise ImportError()\n", "issue": "cutils fails when cutils_ext already present elsewhere in sys.path\nEDIT: Now it work with the c|py linker, but not with the cvm linker.\n\nTheano's cutils moudle expects to find cutils_ext inside config.compiledir.\n\nIn cutils.py, cutils_ext.cutils_ext will be loaded. On an import error, this extension will be built and eventually cmodule.dlimport will re-import cutils_ext.cutils_ext. dlimport will inject config.compiledir into the front of sys,path, so the cutils_ext should always be loaded from config.compiledir.\n\nNormally, this will then allow cutils_ext.cutils_ext to be loaded. The problem is if the cutils_ext package was found elsewhere on sys.path, but not the cutils_ext module (extension) within that package. cutils.py lacks the sys.path injection of dlimport. The consequence is that the import of cutils_ext.cutils_ext in dlimport fails because the reference to the outer cutils_ext (the package) will be the incorrect path!\n\nNeedless to say, when a user encounters this bug, it is a pain to solve.\n\nThe solution is to ensure that the cutils.py try statement starts with:\n\nsys.path.insert(0,theano.config.compiledir)\nwith the finally doing:\ndel sys.path[0]\n\n", "before_files": [{"content": "import os\nimport sys\nfrom compilelock import get_lock, release_lock\nfrom theano import config\n\n# TODO These two lines may be removed in the future, when we are 100% sure\n# noone has an old cutils_ext.so lying around anymore.\nif os.path.exists(os.path.join(config.compiledir, 'cutils_ext.so')):\n os.remove(os.path.join(config.compiledir, 'cutils_ext.so'))\n\n\ndef compile_cutils():\n \"\"\"Do just the compilation of cutils_ext\"\"\"\n code = \"\"\"\n #include <Python.h>\n extern \"C\"{\n static PyObject *\n run_cthunk(PyObject *self, PyObject *args)\n {\n PyObject *py_cthunk = NULL;\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\n return NULL;\n\n if (!PyCObject_Check(py_cthunk)) {\n PyErr_SetString(PyExc_ValueError,\n \"Argument to run_cthunk must be a PyCObject.\");\n return NULL;\n }\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\n void* it = PyCObject_GetDesc(py_cthunk);\n int failure = fn(it);\n\n return Py_BuildValue(\"i\", failure);\n }\n\n static PyMethodDef CutilsExtMethods[] = {\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\n \"Run a theano cthunk.\"},\n {NULL, NULL, 0, NULL} /* Sentinel */\n };\n\n PyMODINIT_FUNC\n initcutils_ext(void)\n {\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\n }\n }\n \"\"\"\n\n loc = os.path.join(config.compiledir, 'cutils_ext')\n if not os.path.exists(loc):\n os.mkdir(loc)\n\n args = cmodule.GCC_compiler.compile_args()\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\n preargs=args)\n\ntry:\n # See gh issue #728 for why these lines are here. Summary: compiledir\n # must be at the beginning of the path to avoid conflicts with any other\n # cutils_ext modules that might exist. An __init__.py file must be created\n # for the same reason. Note that these 5 lines may seem redundant (they are\n # repeated in compile_str()) but if another cutils_ext does exist then it\n # will be imported and compile_str won't get called at all.\n sys.path.insert(0, config.compiledir)\n location = os.path.join(config.compiledir, 'cutils_ext')\n if not os.path.exists(location):\n os.mkdir(location)\n file(os.path.join(location, \"__init__.py\"), 'w').close()\n\n try:\n from cutils_ext.cutils_ext import *\n except ImportError:\n import cmodule\n\n get_lock()\n # Ensure no-one else is currently modifying the content of the compilation\n # directory. This is important to prevent multiple processes from trying to\n # compile the cutils_ext module simultaneously.\n try:\n try:\n # We must retry to import it as some other processs could\n # have been compiling it between the first failed import\n # and when we receive the lock\n from cutils_ext.cutils_ext import *\n except ImportError:\n import cmodule\n\n compile_cutils()\n from cutils_ext.cutils_ext import *\n\n finally:\n # Release lock on compilation directory.\n release_lock()\nfinally:\n if sys.path[0] == config.compiledir:\n del sys.path[0]\n", "path": "theano/gof/cutils.py"}, {"content": "import os, logging, sys\n\nimport theano\nfrom theano import config\nfrom theano.gof.compilelock import get_lock, release_lock\nfrom theano.gof import cmodule\n\n_logger = logging.getLogger('theano.gof.lazylinker_c')\n\nforce_compile = False\nversion = 0.20 # must match constant returned in function get_version()\n\ndef try_import():\n global lazylinker_ext\n sys.path[0:0] = [config.compiledir]\n import lazylinker_ext\n del sys.path[0]\n\ndef try_reload():\n sys.path[0:0] = [config.compiledir]\n reload(lazylinker_ext)\n del sys.path[0]\n\ntry:\n _need_reload = False\n if force_compile:\n raise ImportError()\n else:\n try_import()\n _need_reload = True\n if version != getattr(lazylinker_ext, '_version', None):\n raise ImportError()\nexcept ImportError:\n get_lock()\n try:\n # Maybe someone else already finished compiling it while we were\n # waiting for the lock?\n try:\n if force_compile:\n raise ImportError()\n if _need_reload:\n # The module was successfully imported earlier: we need to\n # reload it to check if the version was updated.\n try_reload()\n else:\n try_import()\n _need_reload = True\n if version != getattr(lazylinker_ext, '_version', None):\n raise ImportError()\n except ImportError:\n _logger.info(\"Compiling new CVM\")\n dirname = 'lazylinker_ext'\n # We use a .txt extensions as otherwise it don't get\n # included when we create a package to send to pypi\n # This happen even if we tell to include *.c files\n cfile = os.path.join(theano.__path__[0], 'gof', 'lazylinker_c.c.txt')\n code = open(cfile).read()\n loc = os.path.join(config.compiledir, dirname)\n if not os.path.exists(loc):\n os.mkdir(loc)\n args = cmodule.GCC_compiler.compile_args()\n cmodule.GCC_compiler.compile_str(dirname, code, location=loc,\n preargs=args)\n # Save version into the __init__.py file.\n init_py = os.path.join(loc, '__init__.py')\n open(init_py, 'w').write('_version = %s\\n' % version)\n # If we just compiled the module for the first time, then it was\n # imported at the same time: we need to make sure we do not\n # reload the now outdated __init__.pyc below.\n init_pyc = os.path.join(loc, '__init__.pyc')\n if os.path.isfile(init_pyc):\n os.remove(init_pyc)\n try_import()\n try_reload()\n from lazylinker_ext import lazylinker_ext as lazy_c\n assert (lazylinker_ext._version ==\n lazy_c.get_version())\n _logger.info(\"New version %s\", lazylinker_ext._version)\n finally:\n # Release lock on compilation directory.\n release_lock()\n\nfrom lazylinker_ext.lazylinker_ext import *\nassert force_compile or (version == get_version())\n", "path": "theano/gof/lazylinker_c.py"}]} | 2,698 | 528 |
gh_patches_debug_43873 | rasdani/github-patches | git_diff | Flexget__Flexget-2224 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue 2.7.2: Form plugin
### Expected behaviour:
No error while using 2.5.2 version
### Actual behaviour:
Got an error while running task that using form plugin
### Steps to reproduce:
Running task with form plugin
#### Config:
```yaml
avistaz_subs:
form:
url: https://avistaz.to/auth/login
username: "{{secrets.avistaz.usr}}"
password: "{{secrets.avistaz.pwd}}"
userfield: email_username
passfield: password
html:
url: "https://avistaz.to/subtitles?type=2&search=720p&language=0&subtitle=45&page={{i}}"
title_from: url
links_re:
- '\.(rar|r0+[01](/01)|zip|srt)'
increment:
from: 0
to: 1
#dump: result.html
regexp:
accept:
- someregex
#disable: builtins
download: yes
decompress: true
template: [notify_me]
```
#### Log:
Generated new crash log
### Additional information:
- Flexget Version: 2.7.2
- Python Version: 2.7.9
- Installation method: easy_install upgrade flexget
- OS and version: osmc raspberry pi
- Link to crash log: http://pastebin.com/XHL3dzQH
</issue>
<code>
[start of flexget/plugins/operate/formlogin.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import io
6 import os
7 import socket
8
9 try:
10 import mechanize
11 except ImportError:
12 mechanize = None
13
14 from flexget import plugin
15 from flexget.event import event
16 from flexget.utils.soup import get_soup
17
18 log = logging.getLogger('formlogin')
19
20
21 if mechanize:
22 class SanitizeHandler(mechanize.BaseHandler):
23 def http_response(self, request, response):
24 if not hasattr(response, "seek"):
25 response = mechanize.response_seek_wrapper(response)
26 # Run HTML through BeautifulSoup for sanitizing
27 if 'html' in response.info().get('content-type', ''):
28 soup = get_soup(response.get_data())
29 response.set_data(soup.prettify(encoding=soup.original_encoding))
30 return response
31
32
33 class FormLogin(object):
34 """
35 Login on form
36 """
37
38 schema = {
39 'type': 'object',
40 'properties': {
41 'url': {'type': 'string', 'format': 'url'},
42 'username': {'type': 'string'},
43 'password': {'type': 'string'},
44 'userfield': {'type': 'string'},
45 'passfield': {'type': 'string'}
46 },
47 'required': ['url', 'username', 'password'],
48 'additionalProperties': False
49 }
50
51 def on_task_start(self, task, config):
52 if not mechanize:
53 raise plugin.PluginError('mechanize required (python module), please install it.', log)
54
55 userfield = config.get('userfield', 'username')
56 passfield = config.get('passfield', 'password')
57
58 url = config['url']
59 username = config['username']
60 password = config['password']
61
62 br = mechanize.Browser()
63 br.add_handler(SanitizeHandler())
64 br.set_handle_robots(False)
65 try:
66 br.open(url)
67 except Exception:
68 # TODO: improve error handling
69 log.debug('Exception posting login form.', exc_info=True)
70 raise plugin.PluginError('Unable to post login form', log)
71
72 # br.set_debug_redirects(True)
73 # br.set_debug_responses(True)
74 # br.set_debug_http(True)
75
76 try:
77 for form in br.forms():
78 loginform = form
79
80 try:
81 loginform[userfield] = username
82 loginform[passfield] = password
83 break
84 except Exception:
85 pass
86 else:
87 received = os.path.join(task.manager.config_base, 'received')
88 if not os.path.isdir(received):
89 os.mkdir(received)
90 filename = os.path.join(received, '%s.formlogin.html' % task.name)
91 with io.open(filename, 'wb') as f:
92 f.write(br.response().get_data())
93 log.critical('I have saved the login page content to %s for you to view', filename)
94 raise plugin.PluginError('Unable to find login fields', log)
95 except socket.timeout:
96 raise plugin.PluginError('Timed out on url %s' % url)
97
98 br.form = loginform
99
100 br.submit()
101
102 cookiejar = br._ua_handlers["_cookies"].cookiejar
103
104 # Add cookiejar to our requests session
105 task.requests.add_cookiejar(cookiejar)
106
107
108 @event('plugin.register')
109 def register_plugin():
110 plugin.register(FormLogin, 'form', api_ver=2)
111
[end of flexget/plugins/operate/formlogin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/operate/formlogin.py b/flexget/plugins/operate/formlogin.py
--- a/flexget/plugins/operate/formlogin.py
+++ b/flexget/plugins/operate/formlogin.py
@@ -7,29 +7,17 @@
import socket
try:
- import mechanize
+ import mechanicalsoup
except ImportError:
- mechanize = None
+ mechanicalsoup = None
+import requests
from flexget import plugin
from flexget.event import event
-from flexget.utils.soup import get_soup
log = logging.getLogger('formlogin')
-if mechanize:
- class SanitizeHandler(mechanize.BaseHandler):
- def http_response(self, request, response):
- if not hasattr(response, "seek"):
- response = mechanize.response_seek_wrapper(response)
- # Run HTML through BeautifulSoup for sanitizing
- if 'html' in response.info().get('content-type', ''):
- soup = get_soup(response.get_data())
- response.set_data(soup.prettify(encoding=soup.original_encoding))
- return response
-
-
class FormLogin(object):
"""
Login on form
@@ -49,8 +37,8 @@
}
def on_task_start(self, task, config):
- if not mechanize:
- raise plugin.PluginError('mechanize required (python module), please install it.', log)
+ if not mechanicalsoup:
+ raise plugin.PluginError('mechanicalsoup required (python module), please install it.', log)
userfield = config.get('userfield', 'username')
passfield = config.get('passfield', 'password')
@@ -59,29 +47,28 @@
username = config['username']
password = config['password']
- br = mechanize.Browser()
- br.add_handler(SanitizeHandler())
- br.set_handle_robots(False)
+ br = mechanicalsoup.StatefulBrowser(session=task.requests)
+
try:
- br.open(url)
- except Exception:
+ response = br.open(url)
+ except requests.RequestException:
# TODO: improve error handling
- log.debug('Exception posting login form.', exc_info=True)
- raise plugin.PluginError('Unable to post login form', log)
+ log.debug('Exception getting login page.', exc_info=True)
+ raise plugin.PluginError('Unable to get login page', log)
- # br.set_debug_redirects(True)
- # br.set_debug_responses(True)
- # br.set_debug_http(True)
+ # br.set_debug(True)
+ num_forms = len(br.get_current_page().find_all('form'))
+ if not num_forms:
+ raise plugin.PluginError('Unable to find any forms on {}'.format(url), log)
try:
- for form in br.forms():
- loginform = form
-
+ for form_num in range(num_forms):
+ br.select_form(nr=form_num)
try:
- loginform[userfield] = username
- loginform[passfield] = password
+ br[userfield] = username
+ br[passfield] = password
break
- except Exception:
+ except mechanicalsoup.LinkNotFoundError:
pass
else:
received = os.path.join(task.manager.config_base, 'received')
@@ -89,20 +76,17 @@
os.mkdir(received)
filename = os.path.join(received, '%s.formlogin.html' % task.name)
with io.open(filename, 'wb') as f:
- f.write(br.response().get_data())
+ f.write(response.content)
log.critical('I have saved the login page content to %s for you to view', filename)
raise plugin.PluginError('Unable to find login fields', log)
except socket.timeout:
raise plugin.PluginError('Timed out on url %s' % url)
- br.form = loginform
-
- br.submit()
-
- cookiejar = br._ua_handlers["_cookies"].cookiejar
-
- # Add cookiejar to our requests session
- task.requests.add_cookiejar(cookiejar)
+ try:
+ br.submit_selected()
+ except requests.RequestException:
+ log.debug('Exception submitting login form.', exc_info=True)
+ raise plugin.PluginError('Unable to post login form', log)
@event('plugin.register')
| {"golden_diff": "diff --git a/flexget/plugins/operate/formlogin.py b/flexget/plugins/operate/formlogin.py\n--- a/flexget/plugins/operate/formlogin.py\n+++ b/flexget/plugins/operate/formlogin.py\n@@ -7,29 +7,17 @@\n import socket\n \n try:\n- import mechanize\n+ import mechanicalsoup\n except ImportError:\n- mechanize = None\n+ mechanicalsoup = None\n+import requests\n \n from flexget import plugin\n from flexget.event import event\n-from flexget.utils.soup import get_soup\n \n log = logging.getLogger('formlogin')\n \n \n-if mechanize:\n- class SanitizeHandler(mechanize.BaseHandler):\n- def http_response(self, request, response):\n- if not hasattr(response, \"seek\"):\n- response = mechanize.response_seek_wrapper(response)\n- # Run HTML through BeautifulSoup for sanitizing\n- if 'html' in response.info().get('content-type', ''):\n- soup = get_soup(response.get_data())\n- response.set_data(soup.prettify(encoding=soup.original_encoding))\n- return response\n-\n-\n class FormLogin(object):\n \"\"\"\n Login on form\n@@ -49,8 +37,8 @@\n }\n \n def on_task_start(self, task, config):\n- if not mechanize:\n- raise plugin.PluginError('mechanize required (python module), please install it.', log)\n+ if not mechanicalsoup:\n+ raise plugin.PluginError('mechanicalsoup required (python module), please install it.', log)\n \n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n@@ -59,29 +47,28 @@\n username = config['username']\n password = config['password']\n \n- br = mechanize.Browser()\n- br.add_handler(SanitizeHandler())\n- br.set_handle_robots(False)\n+ br = mechanicalsoup.StatefulBrowser(session=task.requests)\n+\n try:\n- br.open(url)\n- except Exception:\n+ response = br.open(url)\n+ except requests.RequestException:\n # TODO: improve error handling\n- log.debug('Exception posting login form.', exc_info=True)\n- raise plugin.PluginError('Unable to post login form', log)\n+ log.debug('Exception getting login page.', exc_info=True)\n+ raise plugin.PluginError('Unable to get login page', log)\n \n- # br.set_debug_redirects(True)\n- # br.set_debug_responses(True)\n- # br.set_debug_http(True)\n+ # br.set_debug(True)\n \n+ num_forms = len(br.get_current_page().find_all('form'))\n+ if not num_forms:\n+ raise plugin.PluginError('Unable to find any forms on {}'.format(url), log)\n try:\n- for form in br.forms():\n- loginform = form\n-\n+ for form_num in range(num_forms):\n+ br.select_form(nr=form_num)\n try:\n- loginform[userfield] = username\n- loginform[passfield] = password\n+ br[userfield] = username\n+ br[passfield] = password\n break\n- except Exception:\n+ except mechanicalsoup.LinkNotFoundError:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n@@ -89,20 +76,17 @@\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with io.open(filename, 'wb') as f:\n- f.write(br.response().get_data())\n+ f.write(response.content)\n log.critical('I have saved the login page content to %s for you to view', filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n \n- br.form = loginform\n-\n- br.submit()\n-\n- cookiejar = br._ua_handlers[\"_cookies\"].cookiejar\n-\n- # Add cookiejar to our requests session\n- task.requests.add_cookiejar(cookiejar)\n+ try:\n+ br.submit_selected()\n+ except requests.RequestException:\n+ log.debug('Exception submitting login form.', exc_info=True)\n+ raise plugin.PluginError('Unable to post login form', log)\n \n \n @event('plugin.register')\n", "issue": "Issue 2.7.2: Form plugin\n### Expected behaviour:\r\nNo error while using 2.5.2 version\r\n### Actual behaviour:\r\nGot an error while running task that using form plugin\r\n### Steps to reproduce:\r\nRunning task with form plugin\r\n\r\n#### Config:\r\n```yaml\r\n avistaz_subs:\r\n form:\r\n url: https://avistaz.to/auth/login\r\n username: \"{{secrets.avistaz.usr}}\"\r\n password: \"{{secrets.avistaz.pwd}}\"\r\n userfield: email_username\r\n passfield: password\r\n html:\r\n url: \"https://avistaz.to/subtitles?type=2&search=720p&language=0&subtitle=45&page={{i}}\"\r\n title_from: url\r\n links_re:\r\n - '\\.(rar|r0+[01](/01)|zip|srt)'\r\n increment:\r\n from: 0\r\n to: 1\r\n #dump: result.html\r\n regexp:\r\n accept:\r\n - someregex\r\n #disable: builtins\r\n download: yes\r\n decompress: true\r\n template: [notify_me]\r\n```\r\n#### Log:\r\nGenerated new crash log\r\n\r\n### Additional information:\r\n\r\n- Flexget Version: 2.7.2\r\n- Python Version: 2.7.9\r\n- Installation method: easy_install upgrade flexget\r\n- OS and version: osmc raspberry pi\r\n- Link to crash log: http://pastebin.com/XHL3dzQH\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport io\nimport os\nimport socket\n\ntry:\n import mechanize\nexcept ImportError:\n mechanize = None\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('formlogin')\n\n\nif mechanize:\n class SanitizeHandler(mechanize.BaseHandler):\n def http_response(self, request, response):\n if not hasattr(response, \"seek\"):\n response = mechanize.response_seek_wrapper(response)\n # Run HTML through BeautifulSoup for sanitizing\n if 'html' in response.info().get('content-type', ''):\n soup = get_soup(response.get_data())\n response.set_data(soup.prettify(encoding=soup.original_encoding))\n return response\n\n\nclass FormLogin(object):\n \"\"\"\n Login on form\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {'type': 'string', 'format': 'url'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'userfield': {'type': 'string'},\n 'passfield': {'type': 'string'}\n },\n 'required': ['url', 'username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_start(self, task, config):\n if not mechanize:\n raise plugin.PluginError('mechanize required (python module), please install it.', log)\n\n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n\n url = config['url']\n username = config['username']\n password = config['password']\n\n br = mechanize.Browser()\n br.add_handler(SanitizeHandler())\n br.set_handle_robots(False)\n try:\n br.open(url)\n except Exception:\n # TODO: improve error handling\n log.debug('Exception posting login form.', exc_info=True)\n raise plugin.PluginError('Unable to post login form', log)\n\n # br.set_debug_redirects(True)\n # br.set_debug_responses(True)\n # br.set_debug_http(True)\n\n try:\n for form in br.forms():\n loginform = form\n\n try:\n loginform[userfield] = username\n loginform[passfield] = password\n break\n except Exception:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with io.open(filename, 'wb') as f:\n f.write(br.response().get_data())\n log.critical('I have saved the login page content to %s for you to view', filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n\n br.form = loginform\n\n br.submit()\n\n cookiejar = br._ua_handlers[\"_cookies\"].cookiejar\n\n # Add cookiejar to our requests session\n task.requests.add_cookiejar(cookiejar)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FormLogin, 'form', api_ver=2)\n", "path": "flexget/plugins/operate/formlogin.py"}]} | 1,839 | 957 |
gh_patches_debug_3597 | rasdani/github-patches | git_diff | bridgecrewio__checkov-489 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkov crashes when evaluating a Terraform dynamic block in NSGRulePortAccessRestricted.py
**Describe the bug**
When checking azure_security_group_rule, azurerm_network_security_rule or azurerm_network_security_group Terraform resource types, NSGRulePortAccessRestricted.py throws a "TypeError: string indices must be integers" error whenever there's a dynamic block.
**To Reproduce**
Steps to reproduce the behavior:
1. Create a resource in terraform, containing a dynamic security rule -
```
resource "azurerm_network_security_group" "snet_nsgs" {
count = "${length(local.subnets)}"
name = "${local.root}-snet-${lookup(local.subnets[count.index], "name")}-nsg"
location = "${azurerm_resource_group.net_rg.location}"
resource_group_name = "${azurerm_resource_group.net_rg.name}"
tags = "${local.tags}"
dynamic "security_rule" {
for_each = [for s in local.subnets[count.index].nsg_rules : {
name = s.name
priority = s.priority
direction = s.direction
access = s.access
protocol = s.protocol
source_port_range = s.source_port_range
destination_port_range = s.destination_port_range
source_address_prefix = s.source_address_prefix
destination_address_prefix = s.destination_address_prefix
description = s.description
}]
content {
name = security_rule.value.name
priority = security_rule.value.priority
direction = security_rule.value.direction
access = security_rule.value.access
protocol = security_rule.value.protocol
source_port_range = security_rule.value.source_port_range
destination_port_range = security_rule.value.destination_port_range
source_address_prefix = security_rule.value.source_address_prefix
destination_address_prefix = security_rule.value.destination_address_prefix
description = security_rule.value.description
}
}
}
```
2. Run checkov
3. Error!
**Expected behavior**
As checkov cannot evaluate the dynamic block, I expect the check to be skipped without throwing an error.
**Desktop (please complete the following information):**
- OS: Ubuntu
- Checkov Version 1.0.479
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
3 from checkov.common.util.type_forcers import force_list
4 import re
5
6 INTERNET_ADDRESSES = ["*", "0.0.0.0", "<nw>/0", "/0", "internet", "any"]
7 PORT_RANGE = re.compile('\d+-\d+')
8
9
10 class NSGRulePortAccessRestricted(BaseResourceCheck):
11 def __init__(self, name, check_id, port):
12 supported_resources = ['azure_security_group_rule', 'azurerm_network_security_rule', 'azurerm_network_security_group']
13 categories = [CheckCategories.NETWORKING]
14 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)
15 self.port = port
16
17 def is_port_in_range(self, conf):
18 ports = force_list(conf['destination_port_range'][0])
19 for range in ports:
20 if re.match(PORT_RANGE, range):
21 start, end = int(range.split('-')[0]), int(range.split('-')[1])
22 if start <= self.port <= end:
23 return True
24 if range in [str(self.port), '*']:
25 return True
26 return False
27
28 def scan_resource_conf(self, conf):
29 rule_confs = [conf]
30 if 'security_rule' in conf:
31 rule_confs = conf['security_rule']
32
33 for rule_conf in rule_confs:
34 if 'access' in rule_conf and rule_conf['access'][0] == "Allow":
35 if 'direction' in rule_conf and rule_conf['direction'][0] == "Inbound":
36 if 'protocol' in rule_conf and rule_conf['protocol'][0] == 'TCP':
37 if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):
38 if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:
39 return CheckResult.FAILED
40 return CheckResult.PASSED
41
42
[end of checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py
--- a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py
+++ b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py
@@ -26,6 +26,9 @@
return False
def scan_resource_conf(self, conf):
+ if "dynamic" in conf:
+ return CheckResult.UNKNOWN
+
rule_confs = [conf]
if 'security_rule' in conf:
rule_confs = conf['security_rule']
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n--- a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n+++ b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n@@ -26,6 +26,9 @@\n return False\n \n def scan_resource_conf(self, conf):\n+ if \"dynamic\" in conf:\n+ return CheckResult.UNKNOWN\n+ \n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n", "issue": "Checkov crashes when evaluating a Terraform dynamic block in NSGRulePortAccessRestricted.py\n**Describe the bug**\r\nWhen checking azure_security_group_rule, azurerm_network_security_rule or azurerm_network_security_group Terraform resource types, NSGRulePortAccessRestricted.py throws a \"TypeError: string indices must be integers\" error whenever there's a dynamic block.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a resource in terraform, containing a dynamic security rule -\r\n```\r\nresource \"azurerm_network_security_group\" \"snet_nsgs\" {\r\n count = \"${length(local.subnets)}\"\r\n name = \"${local.root}-snet-${lookup(local.subnets[count.index], \"name\")}-nsg\"\r\n location = \"${azurerm_resource_group.net_rg.location}\"\r\n resource_group_name = \"${azurerm_resource_group.net_rg.name}\"\r\n tags = \"${local.tags}\"\r\n\r\n\r\n dynamic \"security_rule\" {\r\n for_each = [for s in local.subnets[count.index].nsg_rules : {\r\n name = s.name\r\n priority = s.priority\r\n direction = s.direction\r\n access = s.access\r\n protocol = s.protocol\r\n source_port_range = s.source_port_range\r\n destination_port_range = s.destination_port_range\r\n source_address_prefix = s.source_address_prefix\r\n destination_address_prefix = s.destination_address_prefix\r\n description = s.description\r\n }]\r\n content {\r\n name = security_rule.value.name\r\n priority = security_rule.value.priority\r\n direction = security_rule.value.direction\r\n access = security_rule.value.access\r\n protocol = security_rule.value.protocol\r\n source_port_range = security_rule.value.source_port_range\r\n destination_port_range = security_rule.value.destination_port_range\r\n source_address_prefix = security_rule.value.source_address_prefix\r\n destination_address_prefix = security_rule.value.destination_address_prefix\r\n description = security_rule.value.description\r\n }\r\n }\r\n}\r\n```\r\n2. Run checkov\r\n3. Error!\r\n\r\n**Expected behavior**\r\nAs checkov cannot evaluate the dynamic block, I expect the check to be skipped without throwing an error.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu\r\n - Checkov Version 1.0.479\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nimport re\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"]\nPORT_RANGE = re.compile('\\d+-\\d+')\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name, check_id, port):\n supported_resources = ['azure_security_group_rule', 'azurerm_network_security_rule', 'azurerm_network_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, conf):\n ports = force_list(conf['destination_port_range'][0])\n for range in ports:\n if re.match(PORT_RANGE, range):\n start, end = int(range.split('-')[0]), int(range.split('-')[1])\n if start <= self.port <= end:\n return True\n if range in [str(self.port), '*']:\n return True\n return False\n\n def scan_resource_conf(self, conf):\n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n\n for rule_conf in rule_confs:\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\n if 'direction' in rule_conf and rule_conf['direction'][0] == \"Inbound\":\n if 'protocol' in rule_conf and rule_conf['protocol'][0] == 'TCP':\n if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):\n if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n", "path": "checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py"}]} | 1,567 | 150 |
gh_patches_debug_13270 | rasdani/github-patches | git_diff | zulip__zulip-28192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Plans and pricing icon
We should update the rocket icon everywhere it shows up in the gear menu (Plans and pricing, Plan management) to:
<img width="128" alt="rocket" src="https://github.com/zulip/zulip/assets/2090066/a951bc3b-1d95-4ec5-b5a2-3cf9d1ba88f4">

source: https://lucide.dev/icons/rocket
</issue>
<code>
[start of zerver/lib/markdown/help_relative_links.py]
1 import re
2 from typing import Any, List, Match
3
4 from markdown import Markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7 from typing_extensions import override
8
9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES
10
11 # There is a lot of duplicated code between this file and
12 # help_settings_links.py. So if you're making a change here consider making
13 # it there as well.
14
15 REGEXP = re.compile(r"\{relative\|(?P<link_type>.*?)\|(?P<key>.*?)\}")
16
17 gear_info = {
18 # The pattern is key: [name, link]
19 # key is from REGEXP: `{relative|gear|key}`
20 # name is what the item is called in the gear menu: `Select **name**.`
21 # link is used for relative links: `Select [name](link).`
22 "stream-settings": [
23 '<i class="zulip-icon zulip-icon-hash"></i> Stream settings',
24 "/#streams/subscribed",
25 ],
26 "settings": [
27 '<i class="zulip-icon zulip-icon-tool"></i> Personal Settings',
28 "/#settings/profile",
29 ],
30 "organization-settings": [
31 '<i class="zulip-icon zulip-icon-building"></i> Organization settings',
32 "/#organization/organization-profile",
33 ],
34 "group-settings": [
35 '<i class="zulip-icon zulip-icon-user-cog"></i> Group settings',
36 "/#groups/your",
37 ],
38 "stats": ['<i class="zulip-icon zulip-icon-bar-chart"></i> Usage statistics', "/stats"],
39 "integrations": ['<i class="zulip-icon-git-pull-request"></i> Integrations', "/integrations/"],
40 "plans": ['<i class="fa fa-rocket"></i> Plans and pricing', "/plans/"],
41 "billing": ['<i class="zulip-icon zulip-icon-credit-card"></i> Billing', "/billing/"],
42 "about-zulip": ["About Zulip", "/#about-zulip"],
43 }
44
45 gear_instructions = """
46 1. Click on the **gear** (<i class="zulip-icon zulip-icon-gear"></i>) icon in
47 the upper right corner of the web or desktop app.
48
49 1. Select {item}.
50 """
51
52
53 def gear_handle_match(key: str) -> str:
54 if relative_help_links:
55 item = f"[{gear_info[key][0]}]({gear_info[key][1]})"
56 else:
57 item = f"**{gear_info[key][0]}**"
58 return gear_instructions.format(item=item)
59
60
61 help_info = {
62 # The pattern is key: [name, link]
63 # key is from REGEXP: `{relative|help|key}`
64 # name is what the item is called in the help menu: `Select **name**.`
65 # link is used for relative links: `Select [name](link).`
66 "keyboard-shortcuts": [
67 '<i class="zulip-icon zulip-icon-keyboard"></i> Keyboard shortcuts',
68 "/#keyboard-shortcuts",
69 ],
70 "message-formatting": [
71 '<i class="zulip-icon zulip-icon-edit"></i> Message formatting',
72 "/#message-formatting",
73 ],
74 "search-filters": [
75 '<i class="zulip-icon zulip-icon-manage-search"></i> Search filters',
76 "/#search-operators",
77 ],
78 "about-zulip": [
79 '<i class="zulip-icon zulip-icon-info"></i> About Zulip',
80 "/#about-zulip",
81 ],
82 }
83
84 help_instructions = """
85 1. Click on the **Help menu** (<i class="zulip-icon zulip-icon-help"></i>) icon
86 in the upper right corner of the app.
87
88 1. Select {item}.
89 """
90
91
92 def help_handle_match(key: str) -> str:
93 if relative_help_links:
94 item = f"[{help_info[key][0]}]({help_info[key][1]})"
95 else:
96 item = f"**{help_info[key][0]}**"
97 return help_instructions.format(item=item)
98
99
100 stream_info = {
101 "all": ["All streams", "/#streams/all"],
102 }
103
104 stream_all_instructions = """
105 1. Click on the **gear** (<i class="zulip-icon zulip-icon-gear"></i>) icon in
106 the upper right corner of the web or desktop app.
107
108 1. Select <i class="zulip-icon zulip-icon-hash"></i> **Stream settings**.
109
110 1. Click {item} in the upper left.
111 """
112
113
114 def stream_handle_match(key: str) -> str:
115 if relative_help_links:
116 item = f"[{stream_info[key][0]}]({stream_info[key][1]})"
117 else:
118 item = f"**{stream_info[key][0]}**"
119 return stream_all_instructions.format(item=item)
120
121
122 group_info = {
123 "all": ["All groups", "/#groups/all"],
124 }
125
126 group_all_instructions = """
127 1. Click on the **gear** (<i class="zulip-icon zulip-icon-gear"></i>) icon in
128 the upper right corner of the web or desktop app.
129
130 1. Select <i class="zulip-icon zulip-icon-user-cog"></i> **Group settings**.
131
132 1. Click {item} in the upper left.
133 """
134
135
136 def group_handle_match(key: str) -> str:
137 if relative_help_links:
138 item = f"[{group_info[key][0]}]({group_info[key][1]})"
139 else:
140 item = f"**{group_info[key][0]}**"
141 return group_all_instructions.format(item=item)
142
143
144 draft_instructions = """
145 1. Click on <i class="fa fa-pencil"></i> **Drafts** in the left sidebar.
146 """
147
148 scheduled_instructions = """
149 1. Click on <i class="fa fa-calendar"></i> **Scheduled messages** in the left
150 sidebar. If you do not see this link, you have no scheduled messages.
151 """
152
153 recent_instructions = """
154 1. Click on <i class="fa fa-clock-o"></i> **Recent conversations** in the left
155 sidebar, or use the <kbd>T</kbd> keyboard shortcut..
156 """
157
158 all_instructions = """
159 1. Click on <i class="fa fa-align-left"></i> **All messages** in the left
160 sidebar, or use the <kbd>A</kbd> keyboard shortcut.
161 """
162
163 starred_instructions = """
164 1. Click on <i class="fa fa-star"></i> **Starred messages** in the left
165 sidebar, or by [searching](/help/search-for-messages) for `is:starred`.
166 """
167
168 direct_instructions = """
169 1. In the left sidebar, click the **All direct messages**
170 (<i class="fa fa-align-right"></i>) icon to the right of the
171 **Direct messages** label, or use the <kbd>Shift</kbd> + <kbd>P</kbd>
172 keyboard shortcut.
173 """
174
175 inbox_instructions = """
176 1. Click on <i class="zulip-icon zulip-icon-inbox"></i> **Inbox** in the left
177 sidebar, or use the <kbd>Shift</kbd> + <kbd>I</kbd> keyboard shortcut.
178 """
179
180 message_info = {
181 "drafts": ["Drafts", "/#drafts", draft_instructions],
182 "scheduled": ["Scheduled messages", "/#scheduled", scheduled_instructions],
183 "recent": ["Recent conversations", "/#recent", recent_instructions],
184 "all": ["All messages", "/#all_messages", all_instructions],
185 "starred": ["Starred messages", "/#narrow/is/starred", starred_instructions],
186 "direct": ["All direct messages", "/#narrow/is/dm", direct_instructions],
187 "inbox": ["Inbox", "/#inbox", inbox_instructions],
188 }
189
190
191 def message_handle_match(key: str) -> str:
192 if relative_help_links:
193 return f"1. Go to [{message_info[key][0]}]({message_info[key][1]})."
194 else:
195 return message_info[key][2]
196
197
198 LINK_TYPE_HANDLERS = {
199 "gear": gear_handle_match,
200 "stream": stream_handle_match,
201 "message": message_handle_match,
202 "help": help_handle_match,
203 "group": group_handle_match,
204 }
205
206
207 class RelativeLinksHelpExtension(Extension):
208 @override
209 def extendMarkdown(self, md: Markdown) -> None:
210 """Add RelativeLinksHelpExtension to the Markdown instance."""
211 md.registerExtension(self)
212 md.preprocessors.register(
213 RelativeLinks(), "help_relative_links", PREPROCESSOR_PRIORITES["help_relative_links"]
214 )
215
216
217 relative_help_links: bool = False
218
219
220 def set_relative_help_links(value: bool) -> None:
221 global relative_help_links
222 relative_help_links = value
223
224
225 class RelativeLinks(Preprocessor):
226 @override
227 def run(self, lines: List[str]) -> List[str]:
228 done = False
229 while not done:
230 for line in lines:
231 loc = lines.index(line)
232 match = REGEXP.search(line)
233
234 if match:
235 text = [self.handleMatch(match)]
236 # The line that contains the directive to include the macro
237 # may be preceded or followed by text or tags, in that case
238 # we need to make sure that any preceding or following text
239 # stays the same.
240 line_split = REGEXP.split(line, maxsplit=0)
241 preceding = line_split[0]
242 following = line_split[-1]
243 text = [preceding, *text, following]
244 lines = lines[:loc] + text + lines[loc + 1 :]
245 break
246 else:
247 done = True
248 return lines
249
250 def handleMatch(self, match: Match[str]) -> str:
251 return LINK_TYPE_HANDLERS[match.group("link_type")](match.group("key"))
252
253
254 def makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:
255 return RelativeLinksHelpExtension(*args, **kwargs)
256
[end of zerver/lib/markdown/help_relative_links.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/markdown/help_relative_links.py b/zerver/lib/markdown/help_relative_links.py
--- a/zerver/lib/markdown/help_relative_links.py
+++ b/zerver/lib/markdown/help_relative_links.py
@@ -37,7 +37,7 @@
],
"stats": ['<i class="zulip-icon zulip-icon-bar-chart"></i> Usage statistics', "/stats"],
"integrations": ['<i class="zulip-icon-git-pull-request"></i> Integrations', "/integrations/"],
- "plans": ['<i class="fa fa-rocket"></i> Plans and pricing', "/plans/"],
+ "plans": ['<i class="zulip-icon zulip-icon-rocket"></i> Plans and pricing', "/plans/"],
"billing": ['<i class="zulip-icon zulip-icon-credit-card"></i> Billing', "/billing/"],
"about-zulip": ["About Zulip", "/#about-zulip"],
}
| {"golden_diff": "diff --git a/zerver/lib/markdown/help_relative_links.py b/zerver/lib/markdown/help_relative_links.py\n--- a/zerver/lib/markdown/help_relative_links.py\n+++ b/zerver/lib/markdown/help_relative_links.py\n@@ -37,7 +37,7 @@\n ],\n \"stats\": ['<i class=\"zulip-icon zulip-icon-bar-chart\"></i> Usage statistics', \"/stats\"],\n \"integrations\": ['<i class=\"zulip-icon-git-pull-request\"></i> Integrations', \"/integrations/\"],\n- \"plans\": ['<i class=\"fa fa-rocket\"></i> Plans and pricing', \"/plans/\"],\n+ \"plans\": ['<i class=\"zulip-icon zulip-icon-rocket\"></i> Plans and pricing', \"/plans/\"],\n \"billing\": ['<i class=\"zulip-icon zulip-icon-credit-card\"></i> Billing', \"/billing/\"],\n \"about-zulip\": [\"About Zulip\", \"/#about-zulip\"],\n }\n", "issue": "Update Plans and pricing icon\nWe should update the rocket icon everywhere it shows up in the gear menu (Plans and pricing, Plan management) to:\r\n\r\n<img width=\"128\" alt=\"rocket\" src=\"https://github.com/zulip/zulip/assets/2090066/a951bc3b-1d95-4ec5-b5a2-3cf9d1ba88f4\">\r\n\r\n\r\n\r\n\r\nsource: https://lucide.dev/icons/rocket\n", "before_files": [{"content": "import re\nfrom typing import Any, List, Match\n\nfrom markdown import Markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\n# There is a lot of duplicated code between this file and\n# help_settings_links.py. So if you're making a change here consider making\n# it there as well.\n\nREGEXP = re.compile(r\"\\{relative\\|(?P<link_type>.*?)\\|(?P<key>.*?)\\}\")\n\ngear_info = {\n # The pattern is key: [name, link]\n # key is from REGEXP: `{relative|gear|key}`\n # name is what the item is called in the gear menu: `Select **name**.`\n # link is used for relative links: `Select [name](link).`\n \"stream-settings\": [\n '<i class=\"zulip-icon zulip-icon-hash\"></i> Stream settings',\n \"/#streams/subscribed\",\n ],\n \"settings\": [\n '<i class=\"zulip-icon zulip-icon-tool\"></i> Personal Settings',\n \"/#settings/profile\",\n ],\n \"organization-settings\": [\n '<i class=\"zulip-icon zulip-icon-building\"></i> Organization settings',\n \"/#organization/organization-profile\",\n ],\n \"group-settings\": [\n '<i class=\"zulip-icon zulip-icon-user-cog\"></i> Group settings',\n \"/#groups/your\",\n ],\n \"stats\": ['<i class=\"zulip-icon zulip-icon-bar-chart\"></i> Usage statistics', \"/stats\"],\n \"integrations\": ['<i class=\"zulip-icon-git-pull-request\"></i> Integrations', \"/integrations/\"],\n \"plans\": ['<i class=\"fa fa-rocket\"></i> Plans and pricing', \"/plans/\"],\n \"billing\": ['<i class=\"zulip-icon zulip-icon-credit-card\"></i> Billing', \"/billing/\"],\n \"about-zulip\": [\"About Zulip\", \"/#about-zulip\"],\n}\n\ngear_instructions = \"\"\"\n1. Click on the **gear** (<i class=\"zulip-icon zulip-icon-gear\"></i>) icon in\n the upper right corner of the web or desktop app.\n\n1. Select {item}.\n\"\"\"\n\n\ndef gear_handle_match(key: str) -> str:\n if relative_help_links:\n item = f\"[{gear_info[key][0]}]({gear_info[key][1]})\"\n else:\n item = f\"**{gear_info[key][0]}**\"\n return gear_instructions.format(item=item)\n\n\nhelp_info = {\n # The pattern is key: [name, link]\n # key is from REGEXP: `{relative|help|key}`\n # name is what the item is called in the help menu: `Select **name**.`\n # link is used for relative links: `Select [name](link).`\n \"keyboard-shortcuts\": [\n '<i class=\"zulip-icon zulip-icon-keyboard\"></i> Keyboard shortcuts',\n \"/#keyboard-shortcuts\",\n ],\n \"message-formatting\": [\n '<i class=\"zulip-icon zulip-icon-edit\"></i> Message formatting',\n \"/#message-formatting\",\n ],\n \"search-filters\": [\n '<i class=\"zulip-icon zulip-icon-manage-search\"></i> Search filters',\n \"/#search-operators\",\n ],\n \"about-zulip\": [\n '<i class=\"zulip-icon zulip-icon-info\"></i> About Zulip',\n \"/#about-zulip\",\n ],\n}\n\nhelp_instructions = \"\"\"\n1. Click on the **Help menu** (<i class=\"zulip-icon zulip-icon-help\"></i>) icon\n in the upper right corner of the app.\n\n1. Select {item}.\n\"\"\"\n\n\ndef help_handle_match(key: str) -> str:\n if relative_help_links:\n item = f\"[{help_info[key][0]}]({help_info[key][1]})\"\n else:\n item = f\"**{help_info[key][0]}**\"\n return help_instructions.format(item=item)\n\n\nstream_info = {\n \"all\": [\"All streams\", \"/#streams/all\"],\n}\n\nstream_all_instructions = \"\"\"\n1. Click on the **gear** (<i class=\"zulip-icon zulip-icon-gear\"></i>) icon in\n the upper right corner of the web or desktop app.\n\n1. Select <i class=\"zulip-icon zulip-icon-hash\"></i> **Stream settings**.\n\n1. Click {item} in the upper left.\n\"\"\"\n\n\ndef stream_handle_match(key: str) -> str:\n if relative_help_links:\n item = f\"[{stream_info[key][0]}]({stream_info[key][1]})\"\n else:\n item = f\"**{stream_info[key][0]}**\"\n return stream_all_instructions.format(item=item)\n\n\ngroup_info = {\n \"all\": [\"All groups\", \"/#groups/all\"],\n}\n\ngroup_all_instructions = \"\"\"\n1. Click on the **gear** (<i class=\"zulip-icon zulip-icon-gear\"></i>) icon in\n the upper right corner of the web or desktop app.\n\n1. Select <i class=\"zulip-icon zulip-icon-user-cog\"></i> **Group settings**.\n\n1. Click {item} in the upper left.\n\"\"\"\n\n\ndef group_handle_match(key: str) -> str:\n if relative_help_links:\n item = f\"[{group_info[key][0]}]({group_info[key][1]})\"\n else:\n item = f\"**{group_info[key][0]}**\"\n return group_all_instructions.format(item=item)\n\n\ndraft_instructions = \"\"\"\n1. Click on <i class=\"fa fa-pencil\"></i> **Drafts** in the left sidebar.\n\"\"\"\n\nscheduled_instructions = \"\"\"\n1. Click on <i class=\"fa fa-calendar\"></i> **Scheduled messages** in the left\n sidebar. If you do not see this link, you have no scheduled messages.\n\"\"\"\n\nrecent_instructions = \"\"\"\n1. Click on <i class=\"fa fa-clock-o\"></i> **Recent conversations** in the left\n sidebar, or use the <kbd>T</kbd> keyboard shortcut..\n\"\"\"\n\nall_instructions = \"\"\"\n1. Click on <i class=\"fa fa-align-left\"></i> **All messages** in the left\n sidebar, or use the <kbd>A</kbd> keyboard shortcut.\n\"\"\"\n\nstarred_instructions = \"\"\"\n1. Click on <i class=\"fa fa-star\"></i> **Starred messages** in the left\n sidebar, or by [searching](/help/search-for-messages) for `is:starred`.\n\"\"\"\n\ndirect_instructions = \"\"\"\n1. In the left sidebar, click the **All direct messages**\n (<i class=\"fa fa-align-right\"></i>) icon to the right of the\n **Direct messages** label, or use the <kbd>Shift</kbd> + <kbd>P</kbd>\n keyboard shortcut.\n\"\"\"\n\ninbox_instructions = \"\"\"\n1. Click on <i class=\"zulip-icon zulip-icon-inbox\"></i> **Inbox** in the left\n sidebar, or use the <kbd>Shift</kbd> + <kbd>I</kbd> keyboard shortcut.\n\"\"\"\n\nmessage_info = {\n \"drafts\": [\"Drafts\", \"/#drafts\", draft_instructions],\n \"scheduled\": [\"Scheduled messages\", \"/#scheduled\", scheduled_instructions],\n \"recent\": [\"Recent conversations\", \"/#recent\", recent_instructions],\n \"all\": [\"All messages\", \"/#all_messages\", all_instructions],\n \"starred\": [\"Starred messages\", \"/#narrow/is/starred\", starred_instructions],\n \"direct\": [\"All direct messages\", \"/#narrow/is/dm\", direct_instructions],\n \"inbox\": [\"Inbox\", \"/#inbox\", inbox_instructions],\n}\n\n\ndef message_handle_match(key: str) -> str:\n if relative_help_links:\n return f\"1. Go to [{message_info[key][0]}]({message_info[key][1]}).\"\n else:\n return message_info[key][2]\n\n\nLINK_TYPE_HANDLERS = {\n \"gear\": gear_handle_match,\n \"stream\": stream_handle_match,\n \"message\": message_handle_match,\n \"help\": help_handle_match,\n \"group\": group_handle_match,\n}\n\n\nclass RelativeLinksHelpExtension(Extension):\n @override\n def extendMarkdown(self, md: Markdown) -> None:\n \"\"\"Add RelativeLinksHelpExtension to the Markdown instance.\"\"\"\n md.registerExtension(self)\n md.preprocessors.register(\n RelativeLinks(), \"help_relative_links\", PREPROCESSOR_PRIORITES[\"help_relative_links\"]\n )\n\n\nrelative_help_links: bool = False\n\n\ndef set_relative_help_links(value: bool) -> None:\n global relative_help_links\n relative_help_links = value\n\n\nclass RelativeLinks(Preprocessor):\n @override\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if match:\n text = [self.handleMatch(match)]\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding, *text, following]\n lines = lines[:loc] + text + lines[loc + 1 :]\n break\n else:\n done = True\n return lines\n\n def handleMatch(self, match: Match[str]) -> str:\n return LINK_TYPE_HANDLERS[match.group(\"link_type\")](match.group(\"key\"))\n\n\ndef makeExtension(*args: Any, **kwargs: Any) -> RelativeLinksHelpExtension:\n return RelativeLinksHelpExtension(*args, **kwargs)\n", "path": "zerver/lib/markdown/help_relative_links.py"}]} | 3,590 | 225 |
gh_patches_debug_22489 | rasdani/github-patches | git_diff | fossasia__open-event-server-9010 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow to search attendees via email or name
https://test-api.eventyay.com/#attendees-list-attendees-under-an-event-get
</issue>
<code>
[start of app/api/custom/events.py]
1 from flask import Blueprint, jsonify, render_template, request
2 from flask_jwt_extended import current_user
3 from sqlalchemy import asc, distinct, func, or_
4
5 from app.api.helpers.errors import ForbiddenError, UnprocessableEntityError
6 from app.api.helpers.mail import send_email
7 from app.api.helpers.permissions import is_coorganizer, jwt_required, to_event_id
8 from app.api.helpers.system_mails import MAILS, MailType
9 from app.api.helpers.utilities import group_by, strip_tags
10 from app.api.schema.exhibitors import ExhibitorReorderSchema
11 from app.api.schema.speakers import SpeakerReorderSchema
12 from app.models import db
13 from app.models.discount_code import DiscountCode
14 from app.models.event import Event
15 from app.models.exhibitor import Exhibitor
16 from app.models.session import Session
17 from app.models.speaker import Speaker
18
19 events_routes = Blueprint('events_routes', __name__, url_prefix='/v1/events')
20
21
22 @events_routes.route('/<string:event_identifier>/sessions/dates')
23 @to_event_id
24 def get_dates(event_id):
25 date_list = list(
26 zip(
27 *db.session.query(func.date(Session.starts_at))
28 .distinct()
29 .filter(
30 Session.event_id == event_id,
31 Session.starts_at != None,
32 or_(Session.state == 'accepted', Session.state == 'confirmed'),
33 )
34 .order_by(asc(func.date(Session.starts_at)))
35 .all()
36 )
37 )
38 dates = list(
39 map(
40 str,
41 date_list[0] if date_list else [],
42 )
43 )
44 return jsonify(dates)
45
46
47 @events_routes.route('/<string:event_identifier>/contact-organizer', methods=['POST'])
48 @to_event_id
49 @jwt_required
50 def contact_organizer(event_id):
51 event = Event.query.get_or_404(event_id)
52 organizers_emails = list(
53 set(
54 list(map(lambda x: x.email, event.organizers))
55 + list(map(lambda x: x.email, event.coorganizers))
56 )
57 )
58 email = strip_tags(request.json.get('email'))
59 context = {
60 'attendee_name': current_user.fullname,
61 'attendee_email': current_user.email,
62 'event_name': event.name,
63 'email': email,
64 }
65 organizer_mail = (
66 "{attendee_name} ({attendee_email}) has a question for you about your event {event_name}: <br/><br/>"
67 "<div style='white-space: pre-line;'>{email}</div>"
68 )
69 action = MailType.CONTACT_ORGANIZERS
70 mail = MAILS[action]
71 send_email(
72 to=event.owner.email,
73 action=action,
74 subject=event.name + ": Question from " + current_user.fullname,
75 html=organizer_mail.format(**context),
76 bcc=organizers_emails,
77 reply_to=current_user.email,
78 )
79 send_email(
80 to=current_user.email,
81 action=MailType.CONTACT_ORGANIZERS,
82 subject=event.name + ": Organizers are succesfully contacted",
83 html=render_template(
84 mail['template'],
85 event_name=event.name,
86 email_copy=email,
87 ),
88 )
89 return jsonify(
90 success=True,
91 )
92
93
94 @events_routes.route('/<string:event_identifier>/reorder-speakers', methods=['POST'])
95 @to_event_id
96 @is_coorganizer
97 def reorder_speakers(event_id):
98 if 'reset' in request.args:
99 updates = Speaker.query.filter(Speaker.event_id == event_id).update(
100 {Speaker.order: 0}, synchronize_session=False
101 )
102 db.session.commit()
103
104 return jsonify({'success': True, 'updates': updates})
105
106 data, errors = SpeakerReorderSchema(many=True).load(request.json)
107 if errors:
108 raise UnprocessableEntityError(
109 {'pointer': '/data', 'errors': errors}, 'Data in incorrect format'
110 )
111
112 speaker_ids = {item['speaker'] for item in data}
113 event_ids = (
114 db.session.query(distinct(Speaker.event_id))
115 .filter(Speaker.id.in_(speaker_ids))
116 .all()
117 )
118
119 if len(event_ids) != 1 or event_ids[0][0] != event_id:
120 raise ForbiddenError(
121 {'pointer': 'event_id'},
122 'All speakers should be of single event which user has co-organizer access to',
123 )
124
125 result = group_by(data, 'order')
126 updates = {}
127 for (order, items) in result.items():
128 speaker_ids = {item['speaker'] for item in items}
129 result = Speaker.query.filter(Speaker.id.in_(speaker_ids)).update(
130 {Speaker.order: order}, synchronize_session=False
131 )
132 updates[order] = result
133
134 db.session.commit()
135
136 return jsonify({'success': True, 'updates': updates})
137
138
139 @events_routes.route('/<string:event_identifier>/reorder-exhibitors', methods=['POST'])
140 @to_event_id
141 @is_coorganizer
142 def reorder_exhibitors(event_id):
143 if 'reset' in request.args:
144 updates = Exhibitor.query.filter(Exhibitor.event_id == event_id).update(
145 {Exhibitor.position: 0}, synchronize_session=False
146 )
147 db.session.commit()
148
149 return jsonify({'success': True, 'updates': updates})
150
151 data, errors = ExhibitorReorderSchema(many=True).load(request.json)
152 if errors:
153 raise UnprocessableEntityError(
154 {'pointer': '/data', 'errors': errors}, 'Data in incorrect format'
155 )
156
157 exhibitor_ids = {item['exhibitor'] for item in data}
158 event_ids = (
159 db.session.query(distinct(Exhibitor.event_id))
160 .filter(Exhibitor.id.in_(exhibitor_ids))
161 .all()
162 )
163
164 if len(event_ids) != 1 or event_ids[0][0] != event_id:
165 raise ForbiddenError(
166 {'pointer': 'event_id'},
167 'All exhibitors should be of single event which user has co-organizer access to',
168 )
169
170 result = group_by(data, 'position')
171 updates = {}
172 for (position, items) in result.items():
173 exhibitor_ids = {item['exhibitor'] for item in items}
174 result = Exhibitor.query.filter(Exhibitor.id.in_(exhibitor_ids)).update(
175 {Exhibitor.position: position}, synchronize_session=False
176 )
177 updates[position] = result
178
179 db.session.commit()
180
181 return jsonify({'success': True, 'updates': updates})
182
183
184 @events_routes.route(
185 '/<string:event_identifier>/discount-codes/delete-unused', methods=['DELETE']
186 )
187 @to_event_id
188 @is_coorganizer
189 def delete_unused_discount_codes(event_id):
190 query = DiscountCode.query.filter_by(event_id=event_id, orders=None)
191 result = query.delete(synchronize_session=False)
192
193 db.session.commit()
194
195 return jsonify({'success': True, 'deletes': result})
196
[end of app/api/custom/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/custom/events.py b/app/api/custom/events.py
--- a/app/api/custom/events.py
+++ b/app/api/custom/events.py
@@ -15,6 +15,7 @@
from app.models.exhibitor import Exhibitor
from app.models.session import Session
from app.models.speaker import Speaker
+from app.models.ticket_holder import TicketHolder
events_routes = Blueprint('events_routes', __name__, url_prefix='/v1/events')
@@ -193,3 +194,23 @@
db.session.commit()
return jsonify({'success': True, 'deletes': result})
+
+
+@events_routes.route('/<string:event_identifier>/attendees/search', methods=['GET'])
+@to_event_id
+@jwt_required
+def search_attendees(event_id):
+ """Search attendees by name or email."""
+ query = TicketHolder.query.filter(TicketHolder.event_id == event_id)
+ args = request.args
+ if args.get('name'):
+ query = query.filter(
+ (TicketHolder.firstname.ilike('%' + args.get('name') + '%'))
+ | (TicketHolder.lastname.ilike('%' + args.get('name') + '%'))
+ )
+ if args.get('email'):
+ query = query.filter(TicketHolder.email.ilike('%' + args.get('email') + '%'))
+
+ attendees = query.order_by(TicketHolder.id.desc()).all()
+
+ return jsonify({'attendees': attendees})
| {"golden_diff": "diff --git a/app/api/custom/events.py b/app/api/custom/events.py\n--- a/app/api/custom/events.py\n+++ b/app/api/custom/events.py\n@@ -15,6 +15,7 @@\n from app.models.exhibitor import Exhibitor\n from app.models.session import Session\n from app.models.speaker import Speaker\n+from app.models.ticket_holder import TicketHolder\n \n events_routes = Blueprint('events_routes', __name__, url_prefix='/v1/events')\n \n@@ -193,3 +194,23 @@\n db.session.commit()\n \n return jsonify({'success': True, 'deletes': result})\n+\n+\n+@events_routes.route('/<string:event_identifier>/attendees/search', methods=['GET'])\n+@to_event_id\n+@jwt_required\n+def search_attendees(event_id):\n+ \"\"\"Search attendees by name or email.\"\"\"\n+ query = TicketHolder.query.filter(TicketHolder.event_id == event_id)\n+ args = request.args\n+ if args.get('name'):\n+ query = query.filter(\n+ (TicketHolder.firstname.ilike('%' + args.get('name') + '%'))\n+ | (TicketHolder.lastname.ilike('%' + args.get('name') + '%'))\n+ )\n+ if args.get('email'):\n+ query = query.filter(TicketHolder.email.ilike('%' + args.get('email') + '%'))\n+\n+ attendees = query.order_by(TicketHolder.id.desc()).all()\n+\n+ return jsonify({'attendees': attendees})\n", "issue": "Allow to search attendees via email or name\nhttps://test-api.eventyay.com/#attendees-list-attendees-under-an-event-get\n", "before_files": [{"content": "from flask import Blueprint, jsonify, render_template, request\nfrom flask_jwt_extended import current_user\nfrom sqlalchemy import asc, distinct, func, or_\n\nfrom app.api.helpers.errors import ForbiddenError, UnprocessableEntityError\nfrom app.api.helpers.mail import send_email\nfrom app.api.helpers.permissions import is_coorganizer, jwt_required, to_event_id\nfrom app.api.helpers.system_mails import MAILS, MailType\nfrom app.api.helpers.utilities import group_by, strip_tags\nfrom app.api.schema.exhibitors import ExhibitorReorderSchema\nfrom app.api.schema.speakers import SpeakerReorderSchema\nfrom app.models import db\nfrom app.models.discount_code import DiscountCode\nfrom app.models.event import Event\nfrom app.models.exhibitor import Exhibitor\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\n\nevents_routes = Blueprint('events_routes', __name__, url_prefix='/v1/events')\n\n\n@events_routes.route('/<string:event_identifier>/sessions/dates')\n@to_event_id\ndef get_dates(event_id):\n date_list = list(\n zip(\n *db.session.query(func.date(Session.starts_at))\n .distinct()\n .filter(\n Session.event_id == event_id,\n Session.starts_at != None,\n or_(Session.state == 'accepted', Session.state == 'confirmed'),\n )\n .order_by(asc(func.date(Session.starts_at)))\n .all()\n )\n )\n dates = list(\n map(\n str,\n date_list[0] if date_list else [],\n )\n )\n return jsonify(dates)\n\n\n@events_routes.route('/<string:event_identifier>/contact-organizer', methods=['POST'])\n@to_event_id\n@jwt_required\ndef contact_organizer(event_id):\n event = Event.query.get_or_404(event_id)\n organizers_emails = list(\n set(\n list(map(lambda x: x.email, event.organizers))\n + list(map(lambda x: x.email, event.coorganizers))\n )\n )\n email = strip_tags(request.json.get('email'))\n context = {\n 'attendee_name': current_user.fullname,\n 'attendee_email': current_user.email,\n 'event_name': event.name,\n 'email': email,\n }\n organizer_mail = (\n \"{attendee_name} ({attendee_email}) has a question for you about your event {event_name}: <br/><br/>\"\n \"<div style='white-space: pre-line;'>{email}</div>\"\n )\n action = MailType.CONTACT_ORGANIZERS\n mail = MAILS[action]\n send_email(\n to=event.owner.email,\n action=action,\n subject=event.name + \": Question from \" + current_user.fullname,\n html=organizer_mail.format(**context),\n bcc=organizers_emails,\n reply_to=current_user.email,\n )\n send_email(\n to=current_user.email,\n action=MailType.CONTACT_ORGANIZERS,\n subject=event.name + \": Organizers are succesfully contacted\",\n html=render_template(\n mail['template'],\n event_name=event.name,\n email_copy=email,\n ),\n )\n return jsonify(\n success=True,\n )\n\n\n@events_routes.route('/<string:event_identifier>/reorder-speakers', methods=['POST'])\n@to_event_id\n@is_coorganizer\ndef reorder_speakers(event_id):\n if 'reset' in request.args:\n updates = Speaker.query.filter(Speaker.event_id == event_id).update(\n {Speaker.order: 0}, synchronize_session=False\n )\n db.session.commit()\n\n return jsonify({'success': True, 'updates': updates})\n\n data, errors = SpeakerReorderSchema(many=True).load(request.json)\n if errors:\n raise UnprocessableEntityError(\n {'pointer': '/data', 'errors': errors}, 'Data in incorrect format'\n )\n\n speaker_ids = {item['speaker'] for item in data}\n event_ids = (\n db.session.query(distinct(Speaker.event_id))\n .filter(Speaker.id.in_(speaker_ids))\n .all()\n )\n\n if len(event_ids) != 1 or event_ids[0][0] != event_id:\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'All speakers should be of single event which user has co-organizer access to',\n )\n\n result = group_by(data, 'order')\n updates = {}\n for (order, items) in result.items():\n speaker_ids = {item['speaker'] for item in items}\n result = Speaker.query.filter(Speaker.id.in_(speaker_ids)).update(\n {Speaker.order: order}, synchronize_session=False\n )\n updates[order] = result\n\n db.session.commit()\n\n return jsonify({'success': True, 'updates': updates})\n\n\n@events_routes.route('/<string:event_identifier>/reorder-exhibitors', methods=['POST'])\n@to_event_id\n@is_coorganizer\ndef reorder_exhibitors(event_id):\n if 'reset' in request.args:\n updates = Exhibitor.query.filter(Exhibitor.event_id == event_id).update(\n {Exhibitor.position: 0}, synchronize_session=False\n )\n db.session.commit()\n\n return jsonify({'success': True, 'updates': updates})\n\n data, errors = ExhibitorReorderSchema(many=True).load(request.json)\n if errors:\n raise UnprocessableEntityError(\n {'pointer': '/data', 'errors': errors}, 'Data in incorrect format'\n )\n\n exhibitor_ids = {item['exhibitor'] for item in data}\n event_ids = (\n db.session.query(distinct(Exhibitor.event_id))\n .filter(Exhibitor.id.in_(exhibitor_ids))\n .all()\n )\n\n if len(event_ids) != 1 or event_ids[0][0] != event_id:\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'All exhibitors should be of single event which user has co-organizer access to',\n )\n\n result = group_by(data, 'position')\n updates = {}\n for (position, items) in result.items():\n exhibitor_ids = {item['exhibitor'] for item in items}\n result = Exhibitor.query.filter(Exhibitor.id.in_(exhibitor_ids)).update(\n {Exhibitor.position: position}, synchronize_session=False\n )\n updates[position] = result\n\n db.session.commit()\n\n return jsonify({'success': True, 'updates': updates})\n\n\n@events_routes.route(\n '/<string:event_identifier>/discount-codes/delete-unused', methods=['DELETE']\n)\n@to_event_id\n@is_coorganizer\ndef delete_unused_discount_codes(event_id):\n query = DiscountCode.query.filter_by(event_id=event_id, orders=None)\n result = query.delete(synchronize_session=False)\n\n db.session.commit()\n\n return jsonify({'success': True, 'deletes': result})\n", "path": "app/api/custom/events.py"}]} | 2,534 | 323 |
gh_patches_debug_3022 | rasdani/github-patches | git_diff | conan-io__conan-2763 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Linter "Unable to import" warning when importing a shared Python Conan package in the build() step
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
---
I followed the instructions on http://docs.conan.io/en/latest/howtos/python_code_reuse.html. When I get to the "Requiring a python conan package" step, the linter gives me a warning about importing the shared package:
$ git clone https://github.com/smokris/conan-test-library
$ cd conan-test-library
$ conan export . me/testing
$ cd ..
$ git clone https://github.com/smokris/conan-test-consumer
$ cd conan-test-consumer
$ conan create . me/testing
HelloPyReuse/1.0@me/testing: Exporting package recipe
Linter warnings
WARN: Linter. Line 9: Unable to import 'hello'
…
HelloPyReuse/1.0@me/testing: Calling build()
Hello World from Python!
…
(The imported package works fine; the problem is just that the linter is emitting a warning. I'd prefer that the linter not show this false-positive warning, to improve the linter's signal-to-noise ratio.)
I'm able to reproduce this using:
- Conan 1.1.1 on my local macOS 10.13.3 system
- Conan 1.1.1 on Travis CI's Mac OS 10.10.5 image
- Conan 1.1.1 on Travis CI's Ubuntu 14.04.5 image
- Conan 1.2.0 on CentOS 7.4
</issue>
<code>
[start of conans/client/cmd/export_linter.py]
1 import json
2 import os
3 import sys
4
5 import platform
6
7 from conans.client.output import Color
8 from conans.errors import ConanException
9 from subprocess import PIPE, Popen
10 from conans import __path__ as root_path
11
12
13 def conan_linter(conanfile_path, out):
14 if getattr(sys, 'frozen', False):
15 out.info("No linter available. Use a pip installed conan for recipe linting")
16 return
17 apply_lint = os.environ.get("CONAN_RECIPE_LINTER", True)
18 if not apply_lint or apply_lint == "False":
19 return
20
21 dir_path = os.path.dirname(root_path[0]).replace("\\", "/")
22 dirname = os.path.dirname(conanfile_path).replace("\\", "/")
23 hook = '--init-hook="import sys;sys.path.extend([\'%s\', \'%s\'])"' % (dirname, dir_path)
24
25 try:
26 py3_msgs = None
27 msgs, py3_msgs = _normal_linter(conanfile_path, hook)
28 except Exception as e:
29 out.warn("Failed pylint: %s" % e)
30 else:
31 if py3_msgs:
32 out.writeln("Python 3 incompatibilities\n ERROR: %s"
33 % "\n ERROR: ".join(py3_msgs),
34 front=Color.BRIGHT_MAGENTA)
35 if msgs:
36 out.writeln("Linter warnings\n WARN: %s" % "\n WARN: ".join(msgs),
37 front=Color.MAGENTA)
38 pylint_werr = os.environ.get("CONAN_PYLINT_WERR", None)
39 if pylint_werr and (py3_msgs or msgs):
40 raise ConanException("Package recipe has linter errors. Please fix them.")
41
42
43 def _runner(args):
44 command = ["pylint", "--output-format=json"] + args
45 command = " ".join(command)
46 shell = True if platform.system() != "Windows" else False
47 proc = Popen(command, shell=shell, bufsize=10, stdout=PIPE, stderr=PIPE)
48 stdout, _ = proc.communicate()
49 return json.loads(stdout.decode("utf-8")) if stdout else {}
50
51
52 def _normal_linter(conanfile_path, hook):
53 args = ['--py3k', "--enable=all", "--reports=no", "--disable=no-absolute-import", "--persistent=no",
54 hook, '"%s"' % conanfile_path]
55 pylintrc = os.environ.get("CONAN_PYLINTRC", None)
56 if pylintrc:
57 if not os.path.exists(pylintrc):
58 raise ConanException("File %s defined by PYLINTRC doesn't exist" % pylintrc)
59 args.append('--rcfile="%s"' % pylintrc)
60
61 output_json = _runner(args)
62 dynamic_fields = ("source_folder", "build_folder", "package_folder", "info_build",
63 "build_requires", "info")
64
65 def _accept_message(msg):
66 symbol = msg.get("symbol")
67 text = msg.get("message")
68
69 if symbol == "no-member":
70 for field in dynamic_fields:
71 if field in text:
72 return False
73 if symbol == "not-callable" and "self.copy is not callable" == text:
74 return False
75 if symbol == "not-callable" and "self.copy_deps is not callable" == text:
76 return False
77 if symbol in ("bare-except", "broad-except"): # No exception type(s) specified
78 return False
79
80 return True
81
82 result = []
83 py3msgs = []
84 for msg in output_json:
85 if msg.get("type") in ("warning", "error"):
86 message_id = msg.get("symbol")
87 if message_id in ("print-statement", "dict-iter-method"):
88 py3msgs.append("Py3 incompatibility. Line %s: %s"
89 % (msg.get("line"), msg.get("message")))
90 elif _accept_message(msg):
91 result.append("Linter. Line %s: %s" % (msg.get("line"), msg.get("message")))
92
93 return result, py3msgs
94
[end of conans/client/cmd/export_linter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/cmd/export_linter.py b/conans/client/cmd/export_linter.py
--- a/conans/client/cmd/export_linter.py
+++ b/conans/client/cmd/export_linter.py
@@ -76,6 +76,8 @@
return False
if symbol in ("bare-except", "broad-except"): # No exception type(s) specified
return False
+ if symbol == "import-error" and msg.get("column") > 3: # Import of a conan python package
+ return False
return True
| {"golden_diff": "diff --git a/conans/client/cmd/export_linter.py b/conans/client/cmd/export_linter.py\n--- a/conans/client/cmd/export_linter.py\n+++ b/conans/client/cmd/export_linter.py\n@@ -76,6 +76,8 @@\n return False\n if symbol in (\"bare-except\", \"broad-except\"): # No exception type(s) specified\n return False\n+ if symbol == \"import-error\" and msg.get(\"column\") > 3: # Import of a conan python package\n+ return False\n \n return True\n", "issue": "[bug] Linter \"Unable to import\" warning when importing a shared Python Conan package in the build() step\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n---\r\n\r\nI followed the instructions on http://docs.conan.io/en/latest/howtos/python_code_reuse.html. When I get to the \"Requiring a python conan package\" step, the linter gives me a warning about importing the shared package:\r\n\r\n $ git clone https://github.com/smokris/conan-test-library\r\n $ cd conan-test-library\r\n $ conan export . me/testing\r\n $ cd ..\r\n\r\n $ git clone https://github.com/smokris/conan-test-consumer\r\n $ cd conan-test-consumer\r\n $ conan create . me/testing\r\n HelloPyReuse/1.0@me/testing: Exporting package recipe\r\n Linter warnings\r\n WARN: Linter. Line 9: Unable to import 'hello'\r\n \u2026\r\n HelloPyReuse/1.0@me/testing: Calling build()\r\n Hello World from Python!\r\n \u2026\r\n\r\n\r\n(The imported package works fine; the problem is just that the linter is emitting a warning. I'd prefer that the linter not show this false-positive warning, to improve the linter's signal-to-noise ratio.)\r\n\r\nI'm able to reproduce this using:\r\n\r\n- Conan 1.1.1 on my local macOS 10.13.3 system\r\n- Conan 1.1.1 on Travis CI's Mac OS 10.10.5 image\r\n- Conan 1.1.1 on Travis CI's Ubuntu 14.04.5 image\r\n- Conan 1.2.0 on CentOS 7.4\r\n\n", "before_files": [{"content": "import json\nimport os\nimport sys\n\nimport platform\n\nfrom conans.client.output import Color\nfrom conans.errors import ConanException\nfrom subprocess import PIPE, Popen\nfrom conans import __path__ as root_path\n\n\ndef conan_linter(conanfile_path, out):\n if getattr(sys, 'frozen', False):\n out.info(\"No linter available. Use a pip installed conan for recipe linting\")\n return\n apply_lint = os.environ.get(\"CONAN_RECIPE_LINTER\", True)\n if not apply_lint or apply_lint == \"False\":\n return\n\n dir_path = os.path.dirname(root_path[0]).replace(\"\\\\\", \"/\")\n dirname = os.path.dirname(conanfile_path).replace(\"\\\\\", \"/\")\n hook = '--init-hook=\"import sys;sys.path.extend([\\'%s\\', \\'%s\\'])\"' % (dirname, dir_path)\n\n try:\n py3_msgs = None\n msgs, py3_msgs = _normal_linter(conanfile_path, hook)\n except Exception as e:\n out.warn(\"Failed pylint: %s\" % e)\n else:\n if py3_msgs:\n out.writeln(\"Python 3 incompatibilities\\n ERROR: %s\"\n % \"\\n ERROR: \".join(py3_msgs),\n front=Color.BRIGHT_MAGENTA)\n if msgs:\n out.writeln(\"Linter warnings\\n WARN: %s\" % \"\\n WARN: \".join(msgs),\n front=Color.MAGENTA)\n pylint_werr = os.environ.get(\"CONAN_PYLINT_WERR\", None)\n if pylint_werr and (py3_msgs or msgs):\n raise ConanException(\"Package recipe has linter errors. Please fix them.\")\n\n\ndef _runner(args):\n command = [\"pylint\", \"--output-format=json\"] + args\n command = \" \".join(command)\n shell = True if platform.system() != \"Windows\" else False\n proc = Popen(command, shell=shell, bufsize=10, stdout=PIPE, stderr=PIPE)\n stdout, _ = proc.communicate()\n return json.loads(stdout.decode(\"utf-8\")) if stdout else {}\n\n\ndef _normal_linter(conanfile_path, hook):\n args = ['--py3k', \"--enable=all\", \"--reports=no\", \"--disable=no-absolute-import\", \"--persistent=no\",\n hook, '\"%s\"' % conanfile_path]\n pylintrc = os.environ.get(\"CONAN_PYLINTRC\", None)\n if pylintrc:\n if not os.path.exists(pylintrc):\n raise ConanException(\"File %s defined by PYLINTRC doesn't exist\" % pylintrc)\n args.append('--rcfile=\"%s\"' % pylintrc)\n\n output_json = _runner(args)\n dynamic_fields = (\"source_folder\", \"build_folder\", \"package_folder\", \"info_build\",\n \"build_requires\", \"info\")\n\n def _accept_message(msg):\n symbol = msg.get(\"symbol\")\n text = msg.get(\"message\")\n\n if symbol == \"no-member\":\n for field in dynamic_fields:\n if field in text:\n return False\n if symbol == \"not-callable\" and \"self.copy is not callable\" == text:\n return False\n if symbol == \"not-callable\" and \"self.copy_deps is not callable\" == text:\n return False\n if symbol in (\"bare-except\", \"broad-except\"): # No exception type(s) specified\n return False\n\n return True\n\n result = []\n py3msgs = []\n for msg in output_json:\n if msg.get(\"type\") in (\"warning\", \"error\"):\n message_id = msg.get(\"symbol\")\n if message_id in (\"print-statement\", \"dict-iter-method\"):\n py3msgs.append(\"Py3 incompatibility. Line %s: %s\"\n % (msg.get(\"line\"), msg.get(\"message\")))\n elif _accept_message(msg):\n result.append(\"Linter. Line %s: %s\" % (msg.get(\"line\"), msg.get(\"message\")))\n\n return result, py3msgs\n", "path": "conans/client/cmd/export_linter.py"}]} | 2,061 | 126 |
gh_patches_debug_37404 | rasdani/github-patches | git_diff | svthalia__concrexit-1135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix "identical-code" issue in website/education/admin.py
Identical blocks of code found in 2 locations. Consider refactoring.
https://codeclimate.com/github/svthalia/concrexit/website/education/admin.py#issue_5ecfbb80e8d0fa00010003f6
</issue>
<code>
[start of website/education/admin.py]
1 """
2 This module registers admin pages for the models
3 """
4 import csv
5
6 from django.contrib import admin
7 from django.http import HttpResponse
8 from django.utils.translation import gettext_lazy as _
9
10 from utils.translation import TranslatedModelAdmin
11 from . import models
12 from .forms import SummaryAdminForm
13
14 admin.site.register(models.Category)
15
16
17 @admin.register(models.Course)
18 class CourseAdmin(TranslatedModelAdmin):
19 fields = (
20 "name",
21 "course_code",
22 "ec",
23 "since",
24 "until",
25 "categories",
26 "old_courses",
27 )
28 list_filter = ("categories", "ec")
29 search_fields = ("name", "course_code")
30
31
32 @admin.register(models.Exam)
33 class ExamAdmin(TranslatedModelAdmin):
34 list_display = (
35 "type",
36 "course",
37 "exam_date",
38 "uploader",
39 "accepted",
40 "language",
41 "download_count",
42 )
43 readonly_fields = ("download_count",)
44 list_filter = ("accepted", "exam_date", "type", "language")
45 search_fields = (
46 "name",
47 "uploader__first_name",
48 "uploader__last_name",
49 "course__name_nl",
50 "course__name_en",
51 )
52 actions = ["accept", "reject", "reset_download_count", "download_csv"]
53
54 def accept(self, request, queryset):
55 queryset.update(accepted=True)
56
57 accept.short_description = _("Mark exams as accepted")
58
59 def reject(self, request, queryset):
60 queryset.update(accepted=False)
61
62 reject.short_description = _("Mark exams as rejected")
63
64 def reset_download_count(self, request, queryset):
65 queryset.update(download_count=0)
66
67 reset_download_count.short_description = _("Reset the marked exams download count")
68
69 def download_csv(self, request, queryset):
70 opts = queryset.model._meta
71 response = HttpResponse(content_type="text/csv")
72 # force download.
73 response["Content-Disposition"] = "attachment;filename=export.csv"
74 # the csv writer
75 writer = csv.writer(response)
76 field_names = [field.name for field in opts.fields]
77 # Write a first row with header information
78 writer.writerow(field_names)
79 # Write data rows
80 for obj in queryset:
81 writer.writerow([getattr(obj, field) for field in field_names])
82 return response
83
84 download_csv.short_description = _("Download marked as csv")
85
86
87 @admin.register(models.Summary)
88 class SummaryAdmin(TranslatedModelAdmin):
89 list_display = (
90 "name",
91 "course",
92 "uploader",
93 "accepted",
94 "language",
95 "download_count",
96 )
97 readonly_fields = ("download_count",)
98 list_filter = ("accepted", "language")
99 search_fields = (
100 "name",
101 "uploader__first_name",
102 "uploader__last_name",
103 "course__name_nl",
104 "course__name_en",
105 )
106 actions = ["accept", "reject", "reset_download_count", "download_csv"]
107 form = SummaryAdminForm
108
109 def accept(self, request, queryset):
110 queryset.update(accepted=True)
111
112 accept.short_description = _("Mark summaries as accepted")
113
114 def reject(self, request, queryset):
115 queryset.update(accepted=False)
116
117 reject.short_description = _("Mark summaries as rejected")
118
119 def reset_download_count(self, request, queryset):
120 queryset.update(download_count=0)
121
122 reset_download_count.short_description = _(
123 "Reset the marked summaries download count"
124 )
125
126 def download_csv(self, request, queryset):
127 opts = queryset.model._meta
128 response = HttpResponse(content_type="text/csv")
129 # force download.
130 response["Content-Disposition"] = "attachment;filename=export.csv"
131 # the csv writer
132 writer = csv.writer(response)
133 field_names = [field.name for field in opts.fields]
134 # Write a first row with header information
135 writer.writerow(field_names)
136 # Write data rows
137 for obj in queryset:
138 writer.writerow([getattr(obj, field) for field in field_names])
139 return response
140
141 download_csv.short_description = _("Download marked as csv")
142
[end of website/education/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/education/admin.py b/website/education/admin.py
--- a/website/education/admin.py
+++ b/website/education/admin.py
@@ -29,8 +29,27 @@
search_fields = ("name", "course_code")
+class WithDownloadCsv:
+ def download_csv(self, request, queryset):
+ opts = queryset.model._meta
+ response = HttpResponse(content_type="text/csv")
+ # force download.
+ response["Content-Disposition"] = "attachment;filename=export.csv"
+ # the csv writer
+ writer = csv.writer(response)
+ field_names = [field.name for field in opts.fields]
+ # Write a first row with header information
+ writer.writerow(field_names)
+ # Write data rows
+ for obj in queryset:
+ writer.writerow([getattr(obj, field) for field in field_names])
+ return response
+
+ download_csv.short_description = _("Download marked as csv")
+
+
@admin.register(models.Exam)
-class ExamAdmin(TranslatedModelAdmin):
+class ExamAdmin(TranslatedModelAdmin, WithDownloadCsv):
list_display = (
"type",
"course",
@@ -66,26 +85,9 @@
reset_download_count.short_description = _("Reset the marked exams download count")
- def download_csv(self, request, queryset):
- opts = queryset.model._meta
- response = HttpResponse(content_type="text/csv")
- # force download.
- response["Content-Disposition"] = "attachment;filename=export.csv"
- # the csv writer
- writer = csv.writer(response)
- field_names = [field.name for field in opts.fields]
- # Write a first row with header information
- writer.writerow(field_names)
- # Write data rows
- for obj in queryset:
- writer.writerow([getattr(obj, field) for field in field_names])
- return response
-
- download_csv.short_description = _("Download marked as csv")
-
@admin.register(models.Summary)
-class SummaryAdmin(TranslatedModelAdmin):
+class SummaryAdmin(TranslatedModelAdmin, WithDownloadCsv):
list_display = (
"name",
"course",
@@ -122,20 +124,3 @@
reset_download_count.short_description = _(
"Reset the marked summaries download count"
)
-
- def download_csv(self, request, queryset):
- opts = queryset.model._meta
- response = HttpResponse(content_type="text/csv")
- # force download.
- response["Content-Disposition"] = "attachment;filename=export.csv"
- # the csv writer
- writer = csv.writer(response)
- field_names = [field.name for field in opts.fields]
- # Write a first row with header information
- writer.writerow(field_names)
- # Write data rows
- for obj in queryset:
- writer.writerow([getattr(obj, field) for field in field_names])
- return response
-
- download_csv.short_description = _("Download marked as csv")
| {"golden_diff": "diff --git a/website/education/admin.py b/website/education/admin.py\n--- a/website/education/admin.py\n+++ b/website/education/admin.py\n@@ -29,8 +29,27 @@\n search_fields = (\"name\", \"course_code\")\n \n \n+class WithDownloadCsv:\n+ def download_csv(self, request, queryset):\n+ opts = queryset.model._meta\n+ response = HttpResponse(content_type=\"text/csv\")\n+ # force download.\n+ response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n+ # the csv writer\n+ writer = csv.writer(response)\n+ field_names = [field.name for field in opts.fields]\n+ # Write a first row with header information\n+ writer.writerow(field_names)\n+ # Write data rows\n+ for obj in queryset:\n+ writer.writerow([getattr(obj, field) for field in field_names])\n+ return response\n+\n+ download_csv.short_description = _(\"Download marked as csv\")\n+\n+\n @admin.register(models.Exam)\n-class ExamAdmin(TranslatedModelAdmin):\n+class ExamAdmin(TranslatedModelAdmin, WithDownloadCsv):\n list_display = (\n \"type\",\n \"course\",\n@@ -66,26 +85,9 @@\n \n reset_download_count.short_description = _(\"Reset the marked exams download count\")\n \n- def download_csv(self, request, queryset):\n- opts = queryset.model._meta\n- response = HttpResponse(content_type=\"text/csv\")\n- # force download.\n- response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n- # the csv writer\n- writer = csv.writer(response)\n- field_names = [field.name for field in opts.fields]\n- # Write a first row with header information\n- writer.writerow(field_names)\n- # Write data rows\n- for obj in queryset:\n- writer.writerow([getattr(obj, field) for field in field_names])\n- return response\n-\n- download_csv.short_description = _(\"Download marked as csv\")\n-\n \n @admin.register(models.Summary)\n-class SummaryAdmin(TranslatedModelAdmin):\n+class SummaryAdmin(TranslatedModelAdmin, WithDownloadCsv):\n list_display = (\n \"name\",\n \"course\",\n@@ -122,20 +124,3 @@\n reset_download_count.short_description = _(\n \"Reset the marked summaries download count\"\n )\n-\n- def download_csv(self, request, queryset):\n- opts = queryset.model._meta\n- response = HttpResponse(content_type=\"text/csv\")\n- # force download.\n- response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n- # the csv writer\n- writer = csv.writer(response)\n- field_names = [field.name for field in opts.fields]\n- # Write a first row with header information\n- writer.writerow(field_names)\n- # Write data rows\n- for obj in queryset:\n- writer.writerow([getattr(obj, field) for field in field_names])\n- return response\n-\n- download_csv.short_description = _(\"Download marked as csv\")\n", "issue": "Fix \"identical-code\" issue in website/education/admin.py\nIdentical blocks of code found in 2 locations. Consider refactoring.\n\nhttps://codeclimate.com/github/svthalia/concrexit/website/education/admin.py#issue_5ecfbb80e8d0fa00010003f6\n", "before_files": [{"content": "\"\"\"\nThis module registers admin pages for the models\n\"\"\"\nimport csv\n\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils.translation import TranslatedModelAdmin\nfrom . import models\nfrom .forms import SummaryAdminForm\n\nadmin.site.register(models.Category)\n\n\[email protected](models.Course)\nclass CourseAdmin(TranslatedModelAdmin):\n fields = (\n \"name\",\n \"course_code\",\n \"ec\",\n \"since\",\n \"until\",\n \"categories\",\n \"old_courses\",\n )\n list_filter = (\"categories\", \"ec\")\n search_fields = (\"name\", \"course_code\")\n\n\[email protected](models.Exam)\nclass ExamAdmin(TranslatedModelAdmin):\n list_display = (\n \"type\",\n \"course\",\n \"exam_date\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"exam_date\", \"type\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark exams as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark exams as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\"Reset the marked exams download count\")\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n\n\[email protected](models.Summary)\nclass SummaryAdmin(TranslatedModelAdmin):\n list_display = (\n \"name\",\n \"course\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n form = SummaryAdminForm\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark summaries as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark summaries as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked summaries download count\"\n )\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n", "path": "website/education/admin.py"}]} | 1,796 | 665 |
gh_patches_debug_5654 | rasdani/github-patches | git_diff | Nitrate__Nitrate-360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ensure to work with Python 3.7
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 with open('VERSION.txt', 'r') as f:
9 pkg_version = f.read().strip()
10
11
12 def get_long_description():
13 with open('README.rst', 'r') as f:
14 return f.read()
15
16
17 install_requires = [
18 'PyMySQL == 0.7.11',
19 'beautifulsoup4 >= 4.1.1',
20 'django >= 1.11,<2.0',
21 'django-contrib-comments == 1.8.0',
22 'django-tinymce == 2.7.0',
23 'django-uuslug == 1.1.8',
24 'html2text',
25 'kobo == 0.7.0',
26 'odfpy >= 0.9.6',
27 'python-bugzilla',
28 'six',
29 'xmltodict',
30 ]
31
32 if sys.version_info.major < 3:
33 install_requires += [
34 'enum34',
35 ]
36
37 extras_require = {
38 # Required for tcms.core.contrib.auth.backends.KerberosBackend
39 'krbauth': [
40 'kerberos == 1.2.5'
41 ],
42
43 # Packages for building documentation
44 'docs': [
45 'Sphinx >= 1.1.2',
46 'sphinx_rtd_theme',
47 ],
48
49 # Necessary packages for running tests
50 'tests': [
51 'coverage',
52 'factory_boy',
53 'flake8',
54 'mock',
55 'pytest',
56 'pytest-cov',
57 'pytest-django',
58 ],
59
60 # Contain tools that assists the development
61 'devtools': [
62 'django-debug-toolbar == 1.7',
63 'tox',
64 'django-extensions',
65 'pygraphviz',
66 'future-breakpoint',
67 ],
68
69 # Required packages required to run async tasks
70 'async': [
71 'celery == 4.1.0',
72 ]
73 }
74
75
76 setup(
77 name='Nitrate',
78 version=pkg_version,
79 description='Test Case Management System',
80 long_description=get_long_description(),
81 author='Nitrate Team',
82 maintainer='Chenxiong Qi',
83 maintainer_email='[email protected]',
84 url='https://github.com/Nitrate/Nitrate/',
85 license='GPLv2+',
86 keywords='test case',
87 install_requires=install_requires,
88 extras_require=extras_require,
89 packages=find_packages(),
90 include_package_data=True,
91 classifiers=[
92 'Framework :: Django',
93 'Framework :: Django :: 1.11',
94 'Intended Audience :: Developers',
95 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
96 'Programming Language :: Python :: 2',
97 'Programming Language :: Python :: 2.7',
98 'Programming Language :: Python :: 3',
99 'Programming Language :: Python :: 3.6',
100 'Topic :: Software Development :: Quality Assurance',
101 'Topic :: Software Development :: Testing',
102 ],
103 )
104
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -97,6 +97,7 @@
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -97,6 +97,7 @@\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n", "issue": "Ensure to work with Python 3.7\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<2.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.1.0',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]} | 1,412 | 100 |
gh_patches_debug_14821 | rasdani/github-patches | git_diff | SciTools__cartopy-1989 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nightshade w/ refraction=-6.0 yields ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
### Description
I'm trying use Nightshade to draw twilight lines (and sub solar point).
If I do Nightshade w/ refraction=-6.0, I get ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
Nightshade w/ refraction=-6.0001 works OK
Also, refraction=89 causes extraneous lines.
#### Code to reproduce
```
import matplotlib.pyplot as plt
fig = plt.figure()
import cartopy
ax = fig.add_subplot(projection=cartopy.crs.PlateCarree())
ax.coastlines()
import datetime
date = datetime.datetime(1999, 12, 31, 12)
from cartopy.feature.nightshade import Nightshade
ax.add_feature(Nightshade(date, color='none'))
ax.add_feature(Nightshade(date, refraction=-6.0, color='none')) # Civil Twilight
#ax.add_feature(Nightshade(date, refraction=-6.0001, color='none')) # Civil Twilight
ax.add_feature(Nightshade(date, refraction=-12, color='none')) # Nautical Twilight
ax.add_feature(Nightshade(date, refraction=89, color='none')) # sub solar circle ...
plt.show()
```
#### Traceback
```
python /tmp/nightshade-bug.py
/usr/lib64/python3.10/site-packages/cartopy/feature/nightshade.py:80: RuntimeWarning: invalid value encountered in arccos
omega0 = np.rad2deg(np.arccos(np.sin(np.deg2rad(refraction)) /
IllegalArgumentException: Points of LinearRing do not form a closed linestring
Traceback (most recent call last):
File "/tmp/nightshade-bug.py", line 15, in <module>
ax.add_feature(Nightshade(date, refraction=-6, color='none')) # Civil Twilight
File "/usr/lib64/python3.10/site-packages/cartopy/feature/nightshade.py", line 93, in __init__
geom = sgeom.Polygon(np.column_stack((x, y)))
File "/usr/lib64/python3.10/site-packages/shapely/geometry/polygon.py", line 243, in __init__
ret = geos_polygon_from_py(shell, holes)
File "/usr/lib64/python3.10/site-packages/shapely/geometry/polygon.py", line 509, in geos_polygon_from_py
ret = geos_linearring_from_py(shell)
File "shapely/speedups/_speedups.pyx", line 408, in shapely.speedups._speedups.geos_linearring_from_py
ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
```
<details>
<summary>Full environment definition</summary>
<!-- fill in the following information as appropriate -->
### Operating system
Fedora 35
### Cartopy version
python3-cartopy-0.20.1-1.fc35.x86_64
python3-shapely-1.7.1-12.fc35.x86_64
python3-matplotlib-3.5.1-3.fc35.x86_64
</details>
Thanks !
</issue>
<code>
[start of lib/cartopy/feature/nightshade.py]
1 # Copyright Cartopy Contributors
2 #
3 # This file is part of Cartopy and is released under the LGPL license.
4 # See COPYING and COPYING.LESSER in the root of the repository for full
5 # licensing details.
6
7 import datetime
8
9 import numpy as np
10 import shapely.geometry as sgeom
11
12 from . import ShapelyFeature
13 from .. import crs as ccrs
14
15
16 class Nightshade(ShapelyFeature):
17 def __init__(self, date=None, delta=0.1, refraction=-0.83,
18 color="k", alpha=0.5, **kwargs):
19 """
20 Shade the darkside of the Earth, accounting for refraction.
21
22 Parameters
23 ----------
24 date : datetime
25 A UTC datetime object used to calculate the position of the sun.
26 Default: datetime.datetime.utcnow()
27 delta : float
28 Stepsize in degrees to determine the resolution of the
29 night polygon feature (``npts = 180 / delta``).
30 refraction : float
31 The adjustment in degrees due to refraction,
32 thickness of the solar disc, elevation etc...
33
34 Note
35 ----
36 Matplotlib keyword arguments can be used when drawing the feature.
37 This allows standard Matplotlib control over aspects such as
38 'color', 'alpha', etc.
39
40 """
41 if date is None:
42 date = datetime.datetime.utcnow()
43
44 # make sure date is UTC, or naive with respect to time zones
45 if date.utcoffset():
46 raise ValueError(
47 f'datetime instance must be UTC, not {date.tzname()}')
48
49 # Returns the Greenwich hour angle,
50 # need longitude (opposite direction)
51 lat, lon = _solar_position(date)
52 pole_lon = lon
53 if lat > 0:
54 pole_lat = -90 + lat
55 central_lon = 180
56 else:
57 pole_lat = 90 + lat
58 central_lon = 0
59
60 rotated_pole = ccrs.RotatedPole(pole_latitude=pole_lat,
61 pole_longitude=pole_lon,
62 central_rotated_longitude=central_lon)
63
64 npts = int(180/delta)
65 x = np.empty(npts*2)
66 y = np.empty(npts*2)
67
68 # Solve the equation for sunrise/sunset:
69 # https://en.wikipedia.org/wiki/Sunrise_equation#Generalized_equation
70 # NOTE: In the generalized equation on Wikipedia,
71 # delta == 0. in the rotated pole coordinate system.
72 # Therefore, the max/min latitude is +/- (90+refraction)
73
74 # Fill latitudes up and then down
75 y[:npts] = np.linspace(-(90+refraction), 90+refraction, npts)
76 y[npts:] = y[:npts][::-1]
77
78 # Solve the generalized equation for omega0, which is the
79 # angle of sunrise/sunset from solar noon
80 omega0 = np.rad2deg(np.arccos(np.sin(np.deg2rad(refraction)) /
81 np.cos(np.deg2rad(y))))
82
83 # Fill the longitude values from the offset for midnight.
84 # This needs to be a closed loop to fill the polygon.
85 # Negative longitudes
86 x[:npts] = -(180 - omega0[:npts])
87 # Positive longitudes
88 x[npts:] = 180 - omega0[npts:]
89
90 kwargs.setdefault('facecolor', color)
91 kwargs.setdefault('alpha', alpha)
92
93 geom = sgeom.Polygon(np.column_stack((x, y)))
94 return super().__init__(
95 [geom], rotated_pole, **kwargs)
96
97
98 def _julian_day(date):
99 """
100 Calculate the Julian day from an input datetime.
101
102 Parameters
103 ----------
104 date
105 A UTC datetime object.
106
107 Note
108 ----
109 Algorithm implemented following equations from Chapter 3 (Algorithm 14):
110 Vallado, David 'Fundamentals of Astrodynamics and Applications', (2007)
111
112 Julian day epoch is: noon on January 1, 4713 BC (proleptic Julian)
113 noon on November 24, 4714 BC (proleptic Gregorian)
114
115 """
116 year = date.year
117 month = date.month
118 day = date.day
119 hour = date.hour
120 minute = date.minute
121 second = date.second
122
123 # January/February correspond to months 13/14 respectively
124 # for the constants to work out properly
125 if month < 3:
126 month += 12
127 year -= 1
128
129 B = 2 - year // 100 + (year // 100) // 4
130 C = ((second/60 + minute)/60 + hour)/24
131
132 JD = (int(365.25*(year + 4716)) + int(30.6001*(month+1)) +
133 day + B - 1524.5 + C)
134 return JD
135
136
137 def _solar_position(date):
138 """
139 Calculate the latitude and longitude point where the sun is
140 directly overhead for the given date.
141
142 Parameters
143 ----------
144 date
145 A UTC datetime object.
146
147 Returns
148 -------
149 (latitude, longitude) in degrees
150
151 Note
152 ----
153 Algorithm implemented following equations from Chapter 5 (Algorithm 29):
154 Vallado, David 'Fundamentals of Astrodynamics and Applications', (2007)
155
156 """
157 # NOTE: Constants are in degrees in the textbook,
158 # so we need to convert the values from deg2rad when taking sin/cos
159
160 # Centuries from J2000
161 T_UT1 = (_julian_day(date) - 2451545.0)/36525
162
163 # solar longitude (deg)
164 lambda_M_sun = (280.460 + 36000.771*T_UT1) % 360
165
166 # solar anomaly (deg)
167 M_sun = (357.5277233 + 35999.05034*T_UT1) % 360
168
169 # ecliptic longitude
170 lambda_ecliptic = (lambda_M_sun + 1.914666471*np.sin(np.deg2rad(M_sun)) +
171 0.019994643*np.sin(np.deg2rad(2*M_sun)))
172
173 # obliquity of the ecliptic (epsilon in Vallado's notation)
174 epsilon = 23.439291 - 0.0130042*T_UT1
175
176 # declination of the sun
177 delta_sun = np.rad2deg(np.arcsin(np.sin(np.deg2rad(epsilon)) *
178 np.sin(np.deg2rad(lambda_ecliptic))))
179
180 # Greenwich mean sidereal time (seconds)
181 theta_GMST = (67310.54841 +
182 (876600*3600 + 8640184.812866)*T_UT1 +
183 0.093104*T_UT1**2 -
184 6.2e-6*T_UT1**3)
185 # Convert to degrees
186 theta_GMST = (theta_GMST % 86400)/240
187
188 # Right ascension calculations
189 numerator = (np.cos(np.deg2rad(epsilon)) *
190 np.sin(np.deg2rad(lambda_ecliptic)) /
191 np.cos(np.deg2rad(delta_sun)))
192 denominator = (np.cos(np.deg2rad(lambda_ecliptic)) /
193 np.cos(np.deg2rad(delta_sun)))
194
195 alpha_sun = np.rad2deg(np.arctan2(numerator, denominator))
196
197 # longitude is opposite of Greenwich Hour Angle (GHA)
198 # GHA == theta_GMST - alpha_sun
199 lon = -(theta_GMST-alpha_sun)
200 if lon < -180:
201 lon += 360
202
203 return (delta_sun, lon)
204
[end of lib/cartopy/feature/nightshade.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/cartopy/feature/nightshade.py b/lib/cartopy/feature/nightshade.py
--- a/lib/cartopy/feature/nightshade.py
+++ b/lib/cartopy/feature/nightshade.py
@@ -77,8 +77,12 @@
# Solve the generalized equation for omega0, which is the
# angle of sunrise/sunset from solar noon
- omega0 = np.rad2deg(np.arccos(np.sin(np.deg2rad(refraction)) /
- np.cos(np.deg2rad(y))))
+ # We need to clip the input to arccos to [-1, 1] due to floating
+ # point precision and arccos creating nans for values outside
+ # of the domain
+ arccos_tmp = np.clip(np.sin(np.deg2rad(refraction)) /
+ np.cos(np.deg2rad(y)), -1, 1)
+ omega0 = np.rad2deg(np.arccos(arccos_tmp))
# Fill the longitude values from the offset for midnight.
# This needs to be a closed loop to fill the polygon.
| {"golden_diff": "diff --git a/lib/cartopy/feature/nightshade.py b/lib/cartopy/feature/nightshade.py\n--- a/lib/cartopy/feature/nightshade.py\n+++ b/lib/cartopy/feature/nightshade.py\n@@ -77,8 +77,12 @@\n \n # Solve the generalized equation for omega0, which is the\n # angle of sunrise/sunset from solar noon\n- omega0 = np.rad2deg(np.arccos(np.sin(np.deg2rad(refraction)) /\n- np.cos(np.deg2rad(y))))\n+ # We need to clip the input to arccos to [-1, 1] due to floating\n+ # point precision and arccos creating nans for values outside\n+ # of the domain\n+ arccos_tmp = np.clip(np.sin(np.deg2rad(refraction)) /\n+ np.cos(np.deg2rad(y)), -1, 1)\n+ omega0 = np.rad2deg(np.arccos(arccos_tmp))\n \n # Fill the longitude values from the offset for midnight.\n # This needs to be a closed loop to fill the polygon.\n", "issue": "Nightshade w/ refraction=-6.0 yields ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer\n### Description\r\n\r\nI'm trying use Nightshade to draw twilight lines (and sub solar point).\r\n\r\nIf I do Nightshade w/ refraction=-6.0, I get ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer\r\n\r\nNightshade w/ refraction=-6.0001 works OK\r\n\r\nAlso, refraction=89 causes extraneous lines.\r\n\r\n#### Code to reproduce\r\n\r\n```\r\nimport matplotlib.pyplot as plt\r\nfig = plt.figure()\r\n\r\nimport cartopy\r\nax = fig.add_subplot(projection=cartopy.crs.PlateCarree())\r\nax.coastlines()\r\n\r\nimport datetime\r\ndate = datetime.datetime(1999, 12, 31, 12)\r\n\r\nfrom cartopy.feature.nightshade import Nightshade\r\n\r\nax.add_feature(Nightshade(date, color='none'))\r\nax.add_feature(Nightshade(date, refraction=-6.0, color='none')) # Civil Twilight\r\n#ax.add_feature(Nightshade(date, refraction=-6.0001, color='none')) # Civil Twilight\r\nax.add_feature(Nightshade(date, refraction=-12, color='none')) # Nautical Twilight\r\n\r\nax.add_feature(Nightshade(date, refraction=89, color='none')) # sub solar circle ...\r\n\r\nplt.show()\r\n```\r\n\r\n#### Traceback \r\n\r\n```\r\npython /tmp/nightshade-bug.py \r\n/usr/lib64/python3.10/site-packages/cartopy/feature/nightshade.py:80: RuntimeWarning: invalid value encountered in arccos\r\n omega0 = np.rad2deg(np.arccos(np.sin(np.deg2rad(refraction)) /\r\nIllegalArgumentException: Points of LinearRing do not form a closed linestring\r\nTraceback (most recent call last):\r\n File \"/tmp/nightshade-bug.py\", line 15, in <module>\r\n ax.add_feature(Nightshade(date, refraction=-6, color='none')) # Civil Twilight\r\n File \"/usr/lib64/python3.10/site-packages/cartopy/feature/nightshade.py\", line 93, in __init__\r\n geom = sgeom.Polygon(np.column_stack((x, y)))\r\n File \"/usr/lib64/python3.10/site-packages/shapely/geometry/polygon.py\", line 243, in __init__\r\n ret = geos_polygon_from_py(shell, holes)\r\n File \"/usr/lib64/python3.10/site-packages/shapely/geometry/polygon.py\", line 509, in geos_polygon_from_py\r\n ret = geos_linearring_from_py(shell)\r\n File \"shapely/speedups/_speedups.pyx\", line 408, in shapely.speedups._speedups.geos_linearring_from_py\r\nValueError: GEOSGeom_createLinearRing_r returned a NULL pointer\r\n```\r\n\r\n<details>\r\n <summary>Full environment definition</summary>\r\n\r\n<!-- fill in the following information as appropriate -->\r\n\r\n### Operating system\r\n\r\nFedora 35\r\n\r\n### Cartopy version\r\n\r\npython3-cartopy-0.20.1-1.fc35.x86_64\r\n\r\npython3-shapely-1.7.1-12.fc35.x86_64\r\npython3-matplotlib-3.5.1-3.fc35.x86_64\r\n\r\n</details>\r\n\r\nThanks !\n", "before_files": [{"content": "# Copyright Cartopy Contributors\n#\n# This file is part of Cartopy and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\nimport datetime\n\nimport numpy as np\nimport shapely.geometry as sgeom\n\nfrom . import ShapelyFeature\nfrom .. import crs as ccrs\n\n\nclass Nightshade(ShapelyFeature):\n def __init__(self, date=None, delta=0.1, refraction=-0.83,\n color=\"k\", alpha=0.5, **kwargs):\n \"\"\"\n Shade the darkside of the Earth, accounting for refraction.\n\n Parameters\n ----------\n date : datetime\n A UTC datetime object used to calculate the position of the sun.\n Default: datetime.datetime.utcnow()\n delta : float\n Stepsize in degrees to determine the resolution of the\n night polygon feature (``npts = 180 / delta``).\n refraction : float\n The adjustment in degrees due to refraction,\n thickness of the solar disc, elevation etc...\n\n Note\n ----\n Matplotlib keyword arguments can be used when drawing the feature.\n This allows standard Matplotlib control over aspects such as\n 'color', 'alpha', etc.\n\n \"\"\"\n if date is None:\n date = datetime.datetime.utcnow()\n\n # make sure date is UTC, or naive with respect to time zones\n if date.utcoffset():\n raise ValueError(\n f'datetime instance must be UTC, not {date.tzname()}')\n\n # Returns the Greenwich hour angle,\n # need longitude (opposite direction)\n lat, lon = _solar_position(date)\n pole_lon = lon\n if lat > 0:\n pole_lat = -90 + lat\n central_lon = 180\n else:\n pole_lat = 90 + lat\n central_lon = 0\n\n rotated_pole = ccrs.RotatedPole(pole_latitude=pole_lat,\n pole_longitude=pole_lon,\n central_rotated_longitude=central_lon)\n\n npts = int(180/delta)\n x = np.empty(npts*2)\n y = np.empty(npts*2)\n\n # Solve the equation for sunrise/sunset:\n # https://en.wikipedia.org/wiki/Sunrise_equation#Generalized_equation\n # NOTE: In the generalized equation on Wikipedia,\n # delta == 0. in the rotated pole coordinate system.\n # Therefore, the max/min latitude is +/- (90+refraction)\n\n # Fill latitudes up and then down\n y[:npts] = np.linspace(-(90+refraction), 90+refraction, npts)\n y[npts:] = y[:npts][::-1]\n\n # Solve the generalized equation for omega0, which is the\n # angle of sunrise/sunset from solar noon\n omega0 = np.rad2deg(np.arccos(np.sin(np.deg2rad(refraction)) /\n np.cos(np.deg2rad(y))))\n\n # Fill the longitude values from the offset for midnight.\n # This needs to be a closed loop to fill the polygon.\n # Negative longitudes\n x[:npts] = -(180 - omega0[:npts])\n # Positive longitudes\n x[npts:] = 180 - omega0[npts:]\n\n kwargs.setdefault('facecolor', color)\n kwargs.setdefault('alpha', alpha)\n\n geom = sgeom.Polygon(np.column_stack((x, y)))\n return super().__init__(\n [geom], rotated_pole, **kwargs)\n\n\ndef _julian_day(date):\n \"\"\"\n Calculate the Julian day from an input datetime.\n\n Parameters\n ----------\n date\n A UTC datetime object.\n\n Note\n ----\n Algorithm implemented following equations from Chapter 3 (Algorithm 14):\n Vallado, David 'Fundamentals of Astrodynamics and Applications', (2007)\n\n Julian day epoch is: noon on January 1, 4713 BC (proleptic Julian)\n noon on November 24, 4714 BC (proleptic Gregorian)\n\n \"\"\"\n year = date.year\n month = date.month\n day = date.day\n hour = date.hour\n minute = date.minute\n second = date.second\n\n # January/February correspond to months 13/14 respectively\n # for the constants to work out properly\n if month < 3:\n month += 12\n year -= 1\n\n B = 2 - year // 100 + (year // 100) // 4\n C = ((second/60 + minute)/60 + hour)/24\n\n JD = (int(365.25*(year + 4716)) + int(30.6001*(month+1)) +\n day + B - 1524.5 + C)\n return JD\n\n\ndef _solar_position(date):\n \"\"\"\n Calculate the latitude and longitude point where the sun is\n directly overhead for the given date.\n\n Parameters\n ----------\n date\n A UTC datetime object.\n\n Returns\n -------\n (latitude, longitude) in degrees\n\n Note\n ----\n Algorithm implemented following equations from Chapter 5 (Algorithm 29):\n Vallado, David 'Fundamentals of Astrodynamics and Applications', (2007)\n\n \"\"\"\n # NOTE: Constants are in degrees in the textbook,\n # so we need to convert the values from deg2rad when taking sin/cos\n\n # Centuries from J2000\n T_UT1 = (_julian_day(date) - 2451545.0)/36525\n\n # solar longitude (deg)\n lambda_M_sun = (280.460 + 36000.771*T_UT1) % 360\n\n # solar anomaly (deg)\n M_sun = (357.5277233 + 35999.05034*T_UT1) % 360\n\n # ecliptic longitude\n lambda_ecliptic = (lambda_M_sun + 1.914666471*np.sin(np.deg2rad(M_sun)) +\n 0.019994643*np.sin(np.deg2rad(2*M_sun)))\n\n # obliquity of the ecliptic (epsilon in Vallado's notation)\n epsilon = 23.439291 - 0.0130042*T_UT1\n\n # declination of the sun\n delta_sun = np.rad2deg(np.arcsin(np.sin(np.deg2rad(epsilon)) *\n np.sin(np.deg2rad(lambda_ecliptic))))\n\n # Greenwich mean sidereal time (seconds)\n theta_GMST = (67310.54841 +\n (876600*3600 + 8640184.812866)*T_UT1 +\n 0.093104*T_UT1**2 -\n 6.2e-6*T_UT1**3)\n # Convert to degrees\n theta_GMST = (theta_GMST % 86400)/240\n\n # Right ascension calculations\n numerator = (np.cos(np.deg2rad(epsilon)) *\n np.sin(np.deg2rad(lambda_ecliptic)) /\n np.cos(np.deg2rad(delta_sun)))\n denominator = (np.cos(np.deg2rad(lambda_ecliptic)) /\n np.cos(np.deg2rad(delta_sun)))\n\n alpha_sun = np.rad2deg(np.arctan2(numerator, denominator))\n\n # longitude is opposite of Greenwich Hour Angle (GHA)\n # GHA == theta_GMST - alpha_sun\n lon = -(theta_GMST-alpha_sun)\n if lon < -180:\n lon += 360\n\n return (delta_sun, lon)\n", "path": "lib/cartopy/feature/nightshade.py"}]} | 3,603 | 247 |
gh_patches_debug_37911 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2466 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Notification and newsletter emails should only go to active users
At the moment these emails are sent to all users, deactivated or not.
</issue>
<code>
[start of app/grandchallenge/notifications/tasks.py]
1 from celery import shared_task
2 from django.core.paginator import Paginator
3 from django.utils.timezone import now
4
5 from grandchallenge.notifications.emails import send_unread_notifications_email
6 from grandchallenge.profiles.models import UserProfile
7
8
9 @shared_task
10 def send_unread_notification_emails():
11 profiles = (
12 UserProfile.objects.filter(
13 receive_notification_emails=True, user__notification__read=False
14 )
15 .distinct()
16 .prefetch_related("user__notification_set")
17 .order_by("pk")
18 )
19 paginator = Paginator(profiles, 1000)
20
21 for page_nr in paginator.page_range:
22 current_page_profiles = paginator.page(page_nr).object_list
23 current_time = now()
24 recipients = {}
25 for profile in current_page_profiles:
26 unread_notifications = [
27 n
28 for n in profile.user.notification_set.all()
29 if not n.read
30 and (
31 profile.notification_email_last_sent_at is None
32 or n.created > profile.notification_email_last_sent_at
33 )
34 ]
35 if unread_notifications:
36 recipients[profile] = len(unread_notifications)
37 profile.notification_email_last_sent_at = current_time
38
39 UserProfile.objects.bulk_update(
40 current_page_profiles, ["notification_email_last_sent_at"]
41 )
42 send_unread_notifications_email(recipients)
43
[end of app/grandchallenge/notifications/tasks.py]
[start of app/grandchallenge/emails/tasks.py]
1 from celery import shared_task
2 from django.conf import settings
3 from django.contrib.auth import get_user_model
4 from django.contrib.sites.models import Site
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.core.mail import EmailMultiAlternatives, get_connection
7 from django.core.paginator import Paginator
8 from django.template.loader import render_to_string
9 from django.utils.html import strip_tags
10 from django.utils.timezone import now
11
12 from grandchallenge.core.templatetags.bleach import md2html
13 from grandchallenge.emails.models import Email
14 from grandchallenge.emails.utils import SendActionChoices
15 from grandchallenge.subdomains.utils import reverse
16
17
18 def get_receivers(action):
19 if action == SendActionChoices.MAILING_LIST:
20 receivers = (
21 get_user_model()
22 .objects.filter(user_profile__receive_newsletter=True)
23 .order_by("pk")
24 )
25 elif action == SendActionChoices.STAFF:
26 receivers = (
27 get_user_model().objects.filter(is_staff=True).order_by("pk")
28 )
29 elif action == SendActionChoices.CHALLENGE_ADMINS:
30 receivers = (
31 get_user_model()
32 .objects.filter(
33 groups__admins_of_challenge__isnull=False,
34 user_profile__receive_newsletter=True,
35 )
36 .distinct()
37 .order_by("pk")
38 )
39 elif action == SendActionChoices.READER_STUDY_EDITORS:
40 receivers = (
41 get_user_model()
42 .objects.filter(
43 groups__editors_of_readerstudy__isnull=False,
44 user_profile__receive_newsletter=True,
45 )
46 .distinct()
47 .order_by("pk")
48 )
49 elif action == SendActionChoices.ALGORITHM_EDITORS:
50 receivers = (
51 get_user_model()
52 .objects.filter(
53 groups__editors_of_algorithm__isnull=False,
54 user_profile__receive_newsletter=True,
55 )
56 .distinct()
57 .order_by("pk")
58 )
59
60 return receivers
61
62
63 def send_mass_html_email(datatuple):
64 connection = get_connection()
65 messages = []
66 for subject, message, sender, recipient, html in datatuple:
67 email = EmailMultiAlternatives(
68 subject, message, sender, recipient, connection=connection
69 )
70 email.attach_alternative(html, "text/html")
71 messages.append(email)
72 return connection.send_messages(messages)
73
74
75 @shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS["acks-late-micro-short"])
76 def send_bulk_email(action, email_pk):
77 try:
78 email = Email.objects.filter(sent=False).get(pk=email_pk)
79 except ObjectDoesNotExist:
80 return
81 subject = email.subject
82 body = email.body
83 html_body = md2html(body)
84 receivers = get_receivers(action=action)
85 paginator = Paginator(receivers, 100)
86 site = Site.objects.get_current()
87 if email.status_report:
88 start_page = email.status_report["last_processed_batch"]
89 else:
90 start_page = 0
91 for page_nr in paginator.page_range[start_page:]:
92 messages = []
93 for recipient in paginator.page(page_nr).object_list:
94 user = get_user_model().objects.get(pk=recipient.pk)
95 link = reverse(
96 "profile-update", kwargs={"username": user.username}
97 )
98 html_content = render_to_string(
99 "vendor/mailgun_transactional_emails/action.html",
100 {
101 "title": subject,
102 "username": user.username,
103 "content": html_body,
104 "link": link,
105 },
106 )
107 html_content_without_linebreaks = html_content.replace("\n", "")
108 text_content = strip_tags(html_content_without_linebreaks)
109 messages.append(
110 (
111 f"[{site.domain.lower()}] {subject}",
112 text_content,
113 settings.DEFAULT_FROM_EMAIL,
114 [user.email],
115 html_content_without_linebreaks,
116 )
117 )
118 send_mass_html_email(messages)
119 email.status_report = {"last_processed_batch": page_nr}
120 email.save()
121
122 email.sent = True
123 email.sent_at = now()
124 email.status_report = None
125 email.save()
126
[end of app/grandchallenge/emails/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/emails/tasks.py b/app/grandchallenge/emails/tasks.py
--- a/app/grandchallenge/emails/tasks.py
+++ b/app/grandchallenge/emails/tasks.py
@@ -19,12 +19,16 @@
if action == SendActionChoices.MAILING_LIST:
receivers = (
get_user_model()
- .objects.filter(user_profile__receive_newsletter=True)
+ .objects.filter(
+ user_profile__receive_newsletter=True, is_active=True
+ )
.order_by("pk")
)
elif action == SendActionChoices.STAFF:
receivers = (
- get_user_model().objects.filter(is_staff=True).order_by("pk")
+ get_user_model()
+ .objects.filter(is_staff=True, is_active=True)
+ .order_by("pk")
)
elif action == SendActionChoices.CHALLENGE_ADMINS:
receivers = (
@@ -32,6 +36,7 @@
.objects.filter(
groups__admins_of_challenge__isnull=False,
user_profile__receive_newsletter=True,
+ is_active=True,
)
.distinct()
.order_by("pk")
@@ -42,6 +47,7 @@
.objects.filter(
groups__editors_of_readerstudy__isnull=False,
user_profile__receive_newsletter=True,
+ is_active=True,
)
.distinct()
.order_by("pk")
@@ -52,6 +58,7 @@
.objects.filter(
groups__editors_of_algorithm__isnull=False,
user_profile__receive_newsletter=True,
+ is_active=True,
)
.distinct()
.order_by("pk")
diff --git a/app/grandchallenge/notifications/tasks.py b/app/grandchallenge/notifications/tasks.py
--- a/app/grandchallenge/notifications/tasks.py
+++ b/app/grandchallenge/notifications/tasks.py
@@ -10,7 +10,9 @@
def send_unread_notification_emails():
profiles = (
UserProfile.objects.filter(
- receive_notification_emails=True, user__notification__read=False
+ receive_notification_emails=True,
+ user__notification__read=False,
+ user__is_active=True,
)
.distinct()
.prefetch_related("user__notification_set")
| {"golden_diff": "diff --git a/app/grandchallenge/emails/tasks.py b/app/grandchallenge/emails/tasks.py\n--- a/app/grandchallenge/emails/tasks.py\n+++ b/app/grandchallenge/emails/tasks.py\n@@ -19,12 +19,16 @@\n if action == SendActionChoices.MAILING_LIST:\r\n receivers = (\r\n get_user_model()\r\n- .objects.filter(user_profile__receive_newsletter=True)\r\n+ .objects.filter(\r\n+ user_profile__receive_newsletter=True, is_active=True\r\n+ )\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.STAFF:\r\n receivers = (\r\n- get_user_model().objects.filter(is_staff=True).order_by(\"pk\")\r\n+ get_user_model()\r\n+ .objects.filter(is_staff=True, is_active=True)\r\n+ .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.CHALLENGE_ADMINS:\r\n receivers = (\r\n@@ -32,6 +36,7 @@\n .objects.filter(\r\n groups__admins_of_challenge__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n+ is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n@@ -42,6 +47,7 @@\n .objects.filter(\r\n groups__editors_of_readerstudy__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n+ is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n@@ -52,6 +58,7 @@\n .objects.filter(\r\n groups__editors_of_algorithm__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n+ is_active=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\ndiff --git a/app/grandchallenge/notifications/tasks.py b/app/grandchallenge/notifications/tasks.py\n--- a/app/grandchallenge/notifications/tasks.py\n+++ b/app/grandchallenge/notifications/tasks.py\n@@ -10,7 +10,9 @@\n def send_unread_notification_emails():\r\n profiles = (\r\n UserProfile.objects.filter(\r\n- receive_notification_emails=True, user__notification__read=False\r\n+ receive_notification_emails=True,\r\n+ user__notification__read=False,\r\n+ user__is_active=True,\r\n )\r\n .distinct()\r\n .prefetch_related(\"user__notification_set\")\n", "issue": "Notification and newsletter emails should only go to active users\nAt the moment these emails are sent to all users, deactivated or not.\n", "before_files": [{"content": "from celery import shared_task\r\nfrom django.core.paginator import Paginator\r\nfrom django.utils.timezone import now\r\n\r\nfrom grandchallenge.notifications.emails import send_unread_notifications_email\r\nfrom grandchallenge.profiles.models import UserProfile\r\n\r\n\r\n@shared_task\r\ndef send_unread_notification_emails():\r\n profiles = (\r\n UserProfile.objects.filter(\r\n receive_notification_emails=True, user__notification__read=False\r\n )\r\n .distinct()\r\n .prefetch_related(\"user__notification_set\")\r\n .order_by(\"pk\")\r\n )\r\n paginator = Paginator(profiles, 1000)\r\n\r\n for page_nr in paginator.page_range:\r\n current_page_profiles = paginator.page(page_nr).object_list\r\n current_time = now()\r\n recipients = {}\r\n for profile in current_page_profiles:\r\n unread_notifications = [\r\n n\r\n for n in profile.user.notification_set.all()\r\n if not n.read\r\n and (\r\n profile.notification_email_last_sent_at is None\r\n or n.created > profile.notification_email_last_sent_at\r\n )\r\n ]\r\n if unread_notifications:\r\n recipients[profile] = len(unread_notifications)\r\n profile.notification_email_last_sent_at = current_time\r\n\r\n UserProfile.objects.bulk_update(\r\n current_page_profiles, [\"notification_email_last_sent_at\"]\r\n )\r\n send_unread_notifications_email(recipients)\r\n", "path": "app/grandchallenge/notifications/tasks.py"}, {"content": "from celery import shared_task\r\nfrom django.conf import settings\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.contrib.sites.models import Site\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.core.mail import EmailMultiAlternatives, get_connection\r\nfrom django.core.paginator import Paginator\r\nfrom django.template.loader import render_to_string\r\nfrom django.utils.html import strip_tags\r\nfrom django.utils.timezone import now\r\n\r\nfrom grandchallenge.core.templatetags.bleach import md2html\r\nfrom grandchallenge.emails.models import Email\r\nfrom grandchallenge.emails.utils import SendActionChoices\r\nfrom grandchallenge.subdomains.utils import reverse\r\n\r\n\r\ndef get_receivers(action):\r\n if action == SendActionChoices.MAILING_LIST:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(user_profile__receive_newsletter=True)\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.STAFF:\r\n receivers = (\r\n get_user_model().objects.filter(is_staff=True).order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.CHALLENGE_ADMINS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__admins_of_challenge__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.READER_STUDY_EDITORS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__editors_of_readerstudy__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n elif action == SendActionChoices.ALGORITHM_EDITORS:\r\n receivers = (\r\n get_user_model()\r\n .objects.filter(\r\n groups__editors_of_algorithm__isnull=False,\r\n user_profile__receive_newsletter=True,\r\n )\r\n .distinct()\r\n .order_by(\"pk\")\r\n )\r\n\r\n return receivers\r\n\r\n\r\ndef send_mass_html_email(datatuple):\r\n connection = get_connection()\r\n messages = []\r\n for subject, message, sender, recipient, html in datatuple:\r\n email = EmailMultiAlternatives(\r\n subject, message, sender, recipient, connection=connection\r\n )\r\n email.attach_alternative(html, \"text/html\")\r\n messages.append(email)\r\n return connection.send_messages(messages)\r\n\r\n\r\n@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS[\"acks-late-micro-short\"])\r\ndef send_bulk_email(action, email_pk):\r\n try:\r\n email = Email.objects.filter(sent=False).get(pk=email_pk)\r\n except ObjectDoesNotExist:\r\n return\r\n subject = email.subject\r\n body = email.body\r\n html_body = md2html(body)\r\n receivers = get_receivers(action=action)\r\n paginator = Paginator(receivers, 100)\r\n site = Site.objects.get_current()\r\n if email.status_report:\r\n start_page = email.status_report[\"last_processed_batch\"]\r\n else:\r\n start_page = 0\r\n for page_nr in paginator.page_range[start_page:]:\r\n messages = []\r\n for recipient in paginator.page(page_nr).object_list:\r\n user = get_user_model().objects.get(pk=recipient.pk)\r\n link = reverse(\r\n \"profile-update\", kwargs={\"username\": user.username}\r\n )\r\n html_content = render_to_string(\r\n \"vendor/mailgun_transactional_emails/action.html\",\r\n {\r\n \"title\": subject,\r\n \"username\": user.username,\r\n \"content\": html_body,\r\n \"link\": link,\r\n },\r\n )\r\n html_content_without_linebreaks = html_content.replace(\"\\n\", \"\")\r\n text_content = strip_tags(html_content_without_linebreaks)\r\n messages.append(\r\n (\r\n f\"[{site.domain.lower()}] {subject}\",\r\n text_content,\r\n settings.DEFAULT_FROM_EMAIL,\r\n [user.email],\r\n html_content_without_linebreaks,\r\n )\r\n )\r\n send_mass_html_email(messages)\r\n email.status_report = {\"last_processed_batch\": page_nr}\r\n email.save()\r\n\r\n email.sent = True\r\n email.sent_at = now()\r\n email.status_report = None\r\n email.save()\r\n", "path": "app/grandchallenge/emails/tasks.py"}]} | 2,075 | 502 |
gh_patches_debug_16188 | rasdani/github-patches | git_diff | blakeblackshear__frigate-9634 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Support]: mqtt sometimes fails
### Describe the problem you are having
Sometimes on boot mqtt fails with the following log.
After several reboots it will run just fine
### Version
0.13.1-34fb1c2
### Frigate config file
```yaml
mqtt:
host: 192.168.86.13
user: *
password: *
```
### Relevant log output
```shell
2024-02-03 11:29:28.183720955 [INFO] Preparing Frigate...
2024-02-03 11:29:30.429983288 [INFO] Starting Frigate...
2024-02-03 11:29:43.565715210 [2024-02-03 11:29:43] frigate.app INFO : Starting Frigate (0.13.1-34fb1c2)
2024-02-03 11:29:43.668674989 [2024-02-03 11:29:43] frigate.config WARNING : Customizing more than a detector model path is unsupported.
2024-02-03 11:29:43.693848249 [2024-02-03 11:29:43] peewee_migrate.logs INFO : Starting migrations
2024-02-03 11:29:43.786091779 [2024-02-03 11:29:43] peewee_migrate.logs INFO : There is nothing to migrate
2024-02-03 11:29:44.077802695 [2024-02-03 11:29:44] frigate.app INFO : Recording process started: 465
2024-02-03 11:29:44.082183481 [2024-02-03 11:29:44] frigate.app INFO : go2rtc process pid: 89
2024-02-03 11:29:44.092318107 Exception in thread mqtt:
2024-02-03 11:29:44.092347726 Traceback (most recent call last):
2024-02-03 11:29:44.092350843 File "/usr/lib/python3.9/threading.py", line 954, in _bootstrap_inner
2024-02-03 11:29:44.132000773 self.run()
2024-02-03 11:29:44.132008391 File "/usr/lib/python3.9/threading.py", line 892, in run
2024-02-03 11:29:44.132351154 self._target(*self._args, **self._kwargs)
2024-02-03 11:29:44.132356410 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 3591, in _thread_main
2024-02-03 11:29:44.166476612 self.loop_forever(retry_first_connection=True)
2024-02-03 11:29:44.166483467 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 1756, in loop_forever
2024-02-03 11:29:44.167015600 rc = self._loop(timeout)
2024-02-03 11:29:44.167020085 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 1164, in _loop
2024-02-03 11:29:44.167335718 rc = self.loop_read()
2024-02-03 11:29:44.167339384 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 1556, in loop_read
2024-02-03 11:29:44.167728400 rc = self._packet_read()
2024-02-03 11:29:44.167732996 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 2439, in _packet_read
2024-02-03 11:29:44.168284999 rc = self._packet_handle()
2024-02-03 11:29:44.168305384 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 3033, in _packet_handle
2024-02-03 11:29:44.169013241 return self._handle_publish()
2024-02-03 11:29:44.169016831 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 3327, in _handle_publish
2024-02-03 11:29:44.169718804 self._handle_on_message(message)
2024-02-03 11:29:44.169722152 File "/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py", line 3556, in _handle_on_message
2024-02-03 11:29:44.170469665 callback(self, self._userdata, message)
2024-02-03 11:29:44.170472610 File "/opt/frigate/frigate/comms/mqtt.py", line 110, in on_mqtt_command
2024-02-03 11:29:44.177426729 self._dispatcher(
2024-02-03 11:29:44.177430972 File "/opt/frigate/frigate/comms/dispatcher.py", line 80, in _receive
2024-02-03 11:29:44.179490027 self._camera_settings_handlers[command](camera_name, payload)
2024-02-03 11:29:44.179512034 File "/opt/frigate/frigate/comms/dispatcher.py", line 281, in _on_recordings_command
2024-02-03 11:29:44.179681083 self.publish(f"{camera_name}/recordings/state", payload, retain=True)
2024-02-03 11:29:44.179685405 File "/opt/frigate/frigate/comms/dispatcher.py", line 111, in publish
2024-02-03 11:29:44.179791262 comm.publish(topic, payload, retain)
2024-02-03 11:29:44.179795488 File "/opt/frigate/frigate/comms/ws.py", line 102, in publish
2024-02-03 11:29:44.180243298 self.websocket_server.manager.broadcast(ws_message)
2024-02-03 11:29:44.180247725 AttributeError: 'WebSocketClient' object has no attribute 'websocket_server'
2024-02-03 11:29:44.244791022 [2024-02-03 11:29:44] frigate.app INFO : Output process started: 476
```
### FFprobe output from your camera
```shell
[{"return_code":0,"stderr":"","stdout":{"programs":[],"streams":[{"avg_frame_rate":"30/1","codec_long_name":"H.264/AVC/MPEG-4AVC/MPEG-4part10","height":1440,"width":2560},{"avg_frame_rate":"0/0","codec_long_name":"AAC(AdvancedAudioCoding)"}]}},{"return_code":0,"stderr":"","stdout":{"programs":[],"streams":[{"avg_frame_rate":"20/1","codec_long_name":"H.264/AVC/MPEG-4AVC/MPEG-4part10","height":672,"width":896},{"avg_frame_rate":"0/0","codec_long_name":"AAC(AdvancedAudioCoding)"}]}}]
```
### Frigate stats
_No response_
### Operating system
Debian
### Install method
Docker Compose
### Coral version
CPU (no coral)
### Network connection
Wired
### Camera make and model
Reolink
### Any other information that may be helpful
_No response_
</issue>
<code>
[start of frigate/comms/ws.py]
1 """Websocket communicator."""
2
3 import errno
4 import json
5 import logging
6 import threading
7 from typing import Callable
8 from wsgiref.simple_server import make_server
9
10 from ws4py.server.wsgirefserver import (
11 WebSocketWSGIHandler,
12 WebSocketWSGIRequestHandler,
13 WSGIServer,
14 )
15 from ws4py.server.wsgiutils import WebSocketWSGIApplication
16 from ws4py.websocket import WebSocket as WebSocket_
17
18 from frigate.comms.dispatcher import Communicator
19 from frigate.config import FrigateConfig
20
21 logger = logging.getLogger(__name__)
22
23
24 class WebSocket(WebSocket_):
25 def unhandled_error(self, error):
26 """
27 Handles the unfriendly socket closures on the server side
28 without showing a confusing error message
29 """
30 if hasattr(error, "errno") and error.errno == errno.ECONNRESET:
31 pass
32 else:
33 logging.getLogger("ws4py").exception("Failed to receive data")
34
35
36 class WebSocketClient(Communicator): # type: ignore[misc]
37 """Frigate wrapper for ws client."""
38
39 def __init__(self, config: FrigateConfig) -> None:
40 self.config = config
41
42 def subscribe(self, receiver: Callable) -> None:
43 self._dispatcher = receiver
44 self.start()
45
46 def start(self) -> None:
47 """Start the websocket client."""
48
49 class _WebSocketHandler(WebSocket): # type: ignore[misc]
50 receiver = self._dispatcher
51
52 def received_message(self, message: WebSocket.received_message) -> None:
53 try:
54 json_message = json.loads(message.data.decode("utf-8"))
55 json_message = {
56 "topic": json_message.get("topic"),
57 "payload": json_message.get("payload"),
58 }
59 except Exception:
60 logger.warning(
61 f"Unable to parse websocket message as valid json: {message.data.decode('utf-8')}"
62 )
63 return
64
65 logger.debug(
66 f"Publishing mqtt message from websockets at {json_message['topic']}."
67 )
68 self.receiver(
69 json_message["topic"],
70 json_message["payload"],
71 )
72
73 # start a websocket server on 5002
74 WebSocketWSGIHandler.http_version = "1.1"
75 self.websocket_server = make_server(
76 "127.0.0.1",
77 5002,
78 server_class=WSGIServer,
79 handler_class=WebSocketWSGIRequestHandler,
80 app=WebSocketWSGIApplication(handler_cls=_WebSocketHandler),
81 )
82 self.websocket_server.initialize_websockets_manager()
83 self.websocket_thread = threading.Thread(
84 target=self.websocket_server.serve_forever
85 )
86 self.websocket_thread.start()
87
88 def publish(self, topic: str, payload: str, _: bool) -> None:
89 try:
90 ws_message = json.dumps(
91 {
92 "topic": topic,
93 "payload": payload,
94 }
95 )
96 except Exception:
97 # if the payload can't be decoded don't relay to clients
98 logger.debug(f"payload for {topic} wasn't text. Skipping...")
99 return
100
101 try:
102 self.websocket_server.manager.broadcast(ws_message)
103 except ConnectionResetError:
104 pass
105
106 def stop(self) -> None:
107 self.websocket_server.manager.close_all()
108 self.websocket_server.manager.stop()
109 self.websocket_server.manager.join()
110 self.websocket_server.shutdown()
111 self.websocket_thread.join()
112 logger.info("Exiting websocket client...")
113
[end of frigate/comms/ws.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frigate/comms/ws.py b/frigate/comms/ws.py
--- a/frigate/comms/ws.py
+++ b/frigate/comms/ws.py
@@ -38,6 +38,7 @@
def __init__(self, config: FrigateConfig) -> None:
self.config = config
+ self.websocket_server = None
def subscribe(self, receiver: Callable) -> None:
self._dispatcher = receiver
@@ -98,6 +99,10 @@
logger.debug(f"payload for {topic} wasn't text. Skipping...")
return
+ if self.websocket_server is None:
+ logger.debug("Skipping message, websocket not connected yet")
+ return
+
try:
self.websocket_server.manager.broadcast(ws_message)
except ConnectionResetError:
| {"golden_diff": "diff --git a/frigate/comms/ws.py b/frigate/comms/ws.py\n--- a/frigate/comms/ws.py\n+++ b/frigate/comms/ws.py\n@@ -38,6 +38,7 @@\n \n def __init__(self, config: FrigateConfig) -> None:\n self.config = config\n+ self.websocket_server = None\n \n def subscribe(self, receiver: Callable) -> None:\n self._dispatcher = receiver\n@@ -98,6 +99,10 @@\n logger.debug(f\"payload for {topic} wasn't text. Skipping...\")\n return\n \n+ if self.websocket_server is None:\n+ logger.debug(\"Skipping message, websocket not connected yet\")\n+ return\n+\n try:\n self.websocket_server.manager.broadcast(ws_message)\n except ConnectionResetError:\n", "issue": "[Support]: mqtt sometimes fails\n### Describe the problem you are having\n\nSometimes on boot mqtt fails with the following log.\r\nAfter several reboots it will run just fine\n\n### Version\n\n0.13.1-34fb1c2\n\n### Frigate config file\n\n```yaml\nmqtt:\r\n host: 192.168.86.13\r\n user: *\r\n password: *\n```\n\n\n### Relevant log output\n\n```shell\n2024-02-03 11:29:28.183720955 [INFO] Preparing Frigate...\r\n2024-02-03 11:29:30.429983288 [INFO] Starting Frigate...\r\n2024-02-03 11:29:43.565715210 [2024-02-03 11:29:43] frigate.app INFO : Starting Frigate (0.13.1-34fb1c2)\r\n2024-02-03 11:29:43.668674989 [2024-02-03 11:29:43] frigate.config WARNING : Customizing more than a detector model path is unsupported.\r\n2024-02-03 11:29:43.693848249 [2024-02-03 11:29:43] peewee_migrate.logs INFO : Starting migrations\r\n2024-02-03 11:29:43.786091779 [2024-02-03 11:29:43] peewee_migrate.logs INFO : There is nothing to migrate\r\n2024-02-03 11:29:44.077802695 [2024-02-03 11:29:44] frigate.app INFO : Recording process started: 465\r\n2024-02-03 11:29:44.082183481 [2024-02-03 11:29:44] frigate.app INFO : go2rtc process pid: 89\r\n2024-02-03 11:29:44.092318107 Exception in thread mqtt:\r\n2024-02-03 11:29:44.092347726 Traceback (most recent call last):\r\n2024-02-03 11:29:44.092350843 File \"/usr/lib/python3.9/threading.py\", line 954, in _bootstrap_inner\r\n2024-02-03 11:29:44.132000773 self.run()\r\n2024-02-03 11:29:44.132008391 File \"/usr/lib/python3.9/threading.py\", line 892, in run\r\n2024-02-03 11:29:44.132351154 self._target(*self._args, **self._kwargs)\r\n2024-02-03 11:29:44.132356410 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 3591, in _thread_main\r\n2024-02-03 11:29:44.166476612 self.loop_forever(retry_first_connection=True)\r\n2024-02-03 11:29:44.166483467 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 1756, in loop_forever\r\n2024-02-03 11:29:44.167015600 rc = self._loop(timeout)\r\n2024-02-03 11:29:44.167020085 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 1164, in _loop\r\n2024-02-03 11:29:44.167335718 rc = self.loop_read()\r\n2024-02-03 11:29:44.167339384 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 1556, in loop_read\r\n2024-02-03 11:29:44.167728400 rc = self._packet_read()\r\n2024-02-03 11:29:44.167732996 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 2439, in _packet_read\r\n2024-02-03 11:29:44.168284999 rc = self._packet_handle()\r\n2024-02-03 11:29:44.168305384 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 3033, in _packet_handle\r\n2024-02-03 11:29:44.169013241 return self._handle_publish()\r\n2024-02-03 11:29:44.169016831 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 3327, in _handle_publish\r\n2024-02-03 11:29:44.169718804 self._handle_on_message(message)\r\n2024-02-03 11:29:44.169722152 File \"/usr/local/lib/python3.9/dist-packages/paho/mqtt/client.py\", line 3556, in _handle_on_message\r\n2024-02-03 11:29:44.170469665 callback(self, self._userdata, message)\r\n2024-02-03 11:29:44.170472610 File \"/opt/frigate/frigate/comms/mqtt.py\", line 110, in on_mqtt_command\r\n2024-02-03 11:29:44.177426729 self._dispatcher(\r\n2024-02-03 11:29:44.177430972 File \"/opt/frigate/frigate/comms/dispatcher.py\", line 80, in _receive\r\n2024-02-03 11:29:44.179490027 self._camera_settings_handlers[command](camera_name, payload)\r\n2024-02-03 11:29:44.179512034 File \"/opt/frigate/frigate/comms/dispatcher.py\", line 281, in _on_recordings_command\r\n2024-02-03 11:29:44.179681083 self.publish(f\"{camera_name}/recordings/state\", payload, retain=True)\r\n2024-02-03 11:29:44.179685405 File \"/opt/frigate/frigate/comms/dispatcher.py\", line 111, in publish\r\n2024-02-03 11:29:44.179791262 comm.publish(topic, payload, retain)\r\n2024-02-03 11:29:44.179795488 File \"/opt/frigate/frigate/comms/ws.py\", line 102, in publish\r\n2024-02-03 11:29:44.180243298 self.websocket_server.manager.broadcast(ws_message)\r\n2024-02-03 11:29:44.180247725 AttributeError: 'WebSocketClient' object has no attribute 'websocket_server'\r\n2024-02-03 11:29:44.244791022 [2024-02-03 11:29:44] frigate.app INFO : Output process started: 476\n```\n\n\n### FFprobe output from your camera\n\n```shell\n[{\"return_code\":0,\"stderr\":\"\",\"stdout\":{\"programs\":[],\"streams\":[{\"avg_frame_rate\":\"30/1\",\"codec_long_name\":\"H.264/AVC/MPEG-4AVC/MPEG-4part10\",\"height\":1440,\"width\":2560},{\"avg_frame_rate\":\"0/0\",\"codec_long_name\":\"AAC(AdvancedAudioCoding)\"}]}},{\"return_code\":0,\"stderr\":\"\",\"stdout\":{\"programs\":[],\"streams\":[{\"avg_frame_rate\":\"20/1\",\"codec_long_name\":\"H.264/AVC/MPEG-4AVC/MPEG-4part10\",\"height\":672,\"width\":896},{\"avg_frame_rate\":\"0/0\",\"codec_long_name\":\"AAC(AdvancedAudioCoding)\"}]}}]\n```\n\n\n### Frigate stats\n\n_No response_\n\n### Operating system\n\nDebian\n\n### Install method\n\nDocker Compose\n\n### Coral version\n\nCPU (no coral)\n\n### Network connection\n\nWired\n\n### Camera make and model\n\nReolink\n\n### Any other information that may be helpful\n\n_No response_\n", "before_files": [{"content": "\"\"\"Websocket communicator.\"\"\"\n\nimport errno\nimport json\nimport logging\nimport threading\nfrom typing import Callable\nfrom wsgiref.simple_server import make_server\n\nfrom ws4py.server.wsgirefserver import (\n WebSocketWSGIHandler,\n WebSocketWSGIRequestHandler,\n WSGIServer,\n)\nfrom ws4py.server.wsgiutils import WebSocketWSGIApplication\nfrom ws4py.websocket import WebSocket as WebSocket_\n\nfrom frigate.comms.dispatcher import Communicator\nfrom frigate.config import FrigateConfig\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebSocket(WebSocket_):\n def unhandled_error(self, error):\n \"\"\"\n Handles the unfriendly socket closures on the server side\n without showing a confusing error message\n \"\"\"\n if hasattr(error, \"errno\") and error.errno == errno.ECONNRESET:\n pass\n else:\n logging.getLogger(\"ws4py\").exception(\"Failed to receive data\")\n\n\nclass WebSocketClient(Communicator): # type: ignore[misc]\n \"\"\"Frigate wrapper for ws client.\"\"\"\n\n def __init__(self, config: FrigateConfig) -> None:\n self.config = config\n\n def subscribe(self, receiver: Callable) -> None:\n self._dispatcher = receiver\n self.start()\n\n def start(self) -> None:\n \"\"\"Start the websocket client.\"\"\"\n\n class _WebSocketHandler(WebSocket): # type: ignore[misc]\n receiver = self._dispatcher\n\n def received_message(self, message: WebSocket.received_message) -> None:\n try:\n json_message = json.loads(message.data.decode(\"utf-8\"))\n json_message = {\n \"topic\": json_message.get(\"topic\"),\n \"payload\": json_message.get(\"payload\"),\n }\n except Exception:\n logger.warning(\n f\"Unable to parse websocket message as valid json: {message.data.decode('utf-8')}\"\n )\n return\n\n logger.debug(\n f\"Publishing mqtt message from websockets at {json_message['topic']}.\"\n )\n self.receiver(\n json_message[\"topic\"],\n json_message[\"payload\"],\n )\n\n # start a websocket server on 5002\n WebSocketWSGIHandler.http_version = \"1.1\"\n self.websocket_server = make_server(\n \"127.0.0.1\",\n 5002,\n server_class=WSGIServer,\n handler_class=WebSocketWSGIRequestHandler,\n app=WebSocketWSGIApplication(handler_cls=_WebSocketHandler),\n )\n self.websocket_server.initialize_websockets_manager()\n self.websocket_thread = threading.Thread(\n target=self.websocket_server.serve_forever\n )\n self.websocket_thread.start()\n\n def publish(self, topic: str, payload: str, _: bool) -> None:\n try:\n ws_message = json.dumps(\n {\n \"topic\": topic,\n \"payload\": payload,\n }\n )\n except Exception:\n # if the payload can't be decoded don't relay to clients\n logger.debug(f\"payload for {topic} wasn't text. Skipping...\")\n return\n\n try:\n self.websocket_server.manager.broadcast(ws_message)\n except ConnectionResetError:\n pass\n\n def stop(self) -> None:\n self.websocket_server.manager.close_all()\n self.websocket_server.manager.stop()\n self.websocket_server.manager.join()\n self.websocket_server.shutdown()\n self.websocket_thread.join()\n logger.info(\"Exiting websocket client...\")\n", "path": "frigate/comms/ws.py"}]} | 3,943 | 175 |
gh_patches_debug_7678 | rasdani/github-patches | git_diff | xonsh__xonsh-490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 'edit and execute command` function
bash and zsh (and I'm sure others) have an edit and execute command that is invoked with Ctrl-e + Ctrl-x which opens the current contents of the command prompt in the system editor. Edits made to the command are then run on save and quit.
It could be this functionality is already baked in and I'm missing it? In any case, it's especially handy for longer commands -- and given xonsh's multi-line input capabilities this seems like a good fit.
Thoughts?
</issue>
<code>
[start of xonsh/prompt_toolkit_shell.py]
1 """The prompt_toolkit based xonsh shell"""
2 import os
3 import builtins
4 from warnings import warn
5
6 from prompt_toolkit.shortcuts import get_input
7 from prompt_toolkit.key_binding.manager import KeyBindingManager
8 from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
9 from pygments.token import Token
10 from pygments.style import Style
11
12 from xonsh.base_shell import BaseShell
13 from xonsh.tools import format_prompt_for_prompt_toolkit
14 from xonsh.prompt_toolkit_completer import PromptToolkitCompleter
15 from xonsh.prompt_toolkit_history import LimitedFileHistory
16 from xonsh.prompt_toolkit_key_bindings import load_xonsh_bindings
17
18
19 def setup_history():
20 """Creates history object."""
21 env = builtins.__xonsh_env__
22 hfile = env.get('XONSH_HISTORY_FILE')
23 history = LimitedFileHistory()
24 try:
25 history.read_history_file(hfile)
26 except PermissionError:
27 warn('do not have read permissions for ' + hfile, RuntimeWarning)
28 return history
29
30
31 def teardown_history(history):
32 """Tears down the history object."""
33 env = builtins.__xonsh_env__
34 hsize = env.get('XONSH_HISTORY_SIZE')[0]
35 hfile = env.get('XONSH_HISTORY_FILE')
36 try:
37 history.save_history_to_file(hfile, hsize)
38 except PermissionError:
39 warn('do not have write permissions for ' + hfile, RuntimeWarning)
40
41
42 class PromptToolkitShell(BaseShell):
43 """The xonsh shell."""
44
45 def __init__(self, **kwargs):
46 super().__init__(**kwargs)
47 self.history = setup_history()
48 self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)
49 self.key_bindings_manager = KeyBindingManager(
50 enable_auto_suggest_bindings=True,
51 enable_search=True, enable_abort_and_exit_bindings=True)
52 load_xonsh_bindings(self.key_bindings_manager)
53
54 def __del__(self):
55 if self.history is not None:
56 teardown_history(self.history)
57
58 def cmdloop(self, intro=None):
59 """Enters a loop that reads and execute input from user."""
60 if intro:
61 print(intro)
62 _auto_suggest = AutoSuggestFromHistory()
63 while not builtins.__xonsh_exit__:
64 try:
65 token_func, style_cls = self._get_prompt_tokens_and_style()
66 mouse_support = builtins.__xonsh_env__.get('MOUSE_SUPPORT')
67 if builtins.__xonsh_env__.get('AUTO_SUGGEST'):
68 auto_suggest = _auto_suggest
69 else:
70 auto_suggest = None
71 completions_display = builtins.__xonsh_env__.get('COMPLETIONS_DISPLAY')
72 multicolumn = (completions_display == 'multi')
73 completer = None if completions_display == 'none' else self.pt_completer
74 line = get_input(
75 mouse_support=mouse_support,
76 auto_suggest=auto_suggest,
77 get_prompt_tokens=token_func,
78 style=style_cls,
79 completer=completer,
80 history=self.history,
81 key_bindings_registry=self.key_bindings_manager.registry,
82 display_completions_in_columns=multicolumn)
83 if not line:
84 self.emptyline()
85 else:
86 line = self.precmd(line)
87 self.default(line)
88 except KeyboardInterrupt:
89 self.reset_buffer()
90 except EOFError:
91 break
92
93 def _get_prompt_tokens_and_style(self):
94 """Returns function to pass as prompt to prompt_toolkit."""
95 token_names, cstyles, strings = format_prompt_for_prompt_toolkit(self.prompt)
96 tokens = [getattr(Token, n) for n in token_names]
97
98 def get_tokens(cli):
99 return list(zip(tokens, strings))
100
101 class CustomStyle(Style):
102 styles = {
103 Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
104 Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
105 Token.Menu.Completions.ProgressButton: 'bg:#003333',
106 Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
107 Token.AutoSuggestion: '#666666',
108 Token.Aborted: '#888888',
109 }
110 # update with the prompt styles
111 styles.update({t: s for (t, s) in zip(tokens, cstyles)})
112 # Update with with any user styles
113 userstyle = builtins.__xonsh_env__.get('PROMPT_TOOLKIT_STYLES')
114 if userstyle is not None:
115 styles.update(userstyle)
116
117 return get_tokens, CustomStyle
118
[end of xonsh/prompt_toolkit_shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/prompt_toolkit_shell.py b/xonsh/prompt_toolkit_shell.py
--- a/xonsh/prompt_toolkit_shell.py
+++ b/xonsh/prompt_toolkit_shell.py
@@ -48,6 +48,7 @@
self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)
self.key_bindings_manager = KeyBindingManager(
enable_auto_suggest_bindings=True,
+ enable_open_in_editor=True,
enable_search=True, enable_abort_and_exit_bindings=True)
load_xonsh_bindings(self.key_bindings_manager)
| {"golden_diff": "diff --git a/xonsh/prompt_toolkit_shell.py b/xonsh/prompt_toolkit_shell.py\n--- a/xonsh/prompt_toolkit_shell.py\n+++ b/xonsh/prompt_toolkit_shell.py\n@@ -48,6 +48,7 @@\n self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)\n self.key_bindings_manager = KeyBindingManager(\n enable_auto_suggest_bindings=True,\n+ enable_open_in_editor=True,\n enable_search=True, enable_abort_and_exit_bindings=True)\n load_xonsh_bindings(self.key_bindings_manager)\n", "issue": "Add 'edit and execute command` function\nbash and zsh (and I'm sure others) have an edit and execute command that is invoked with Ctrl-e + Ctrl-x which opens the current contents of the command prompt in the system editor. Edits made to the command are then run on save and quit. \n\nIt could be this functionality is already baked in and I'm missing it? In any case, it's especially handy for longer commands -- and given xonsh's multi-line input capabilities this seems like a good fit.\n\nThoughts?\n\n", "before_files": [{"content": "\"\"\"The prompt_toolkit based xonsh shell\"\"\"\nimport os\nimport builtins\nfrom warnings import warn\n\nfrom prompt_toolkit.shortcuts import get_input\nfrom prompt_toolkit.key_binding.manager import KeyBindingManager\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom pygments.token import Token\nfrom pygments.style import Style\n\nfrom xonsh.base_shell import BaseShell\nfrom xonsh.tools import format_prompt_for_prompt_toolkit\nfrom xonsh.prompt_toolkit_completer import PromptToolkitCompleter\nfrom xonsh.prompt_toolkit_history import LimitedFileHistory\nfrom xonsh.prompt_toolkit_key_bindings import load_xonsh_bindings\n\n\ndef setup_history():\n \"\"\"Creates history object.\"\"\"\n env = builtins.__xonsh_env__\n hfile = env.get('XONSH_HISTORY_FILE')\n history = LimitedFileHistory()\n try:\n history.read_history_file(hfile)\n except PermissionError:\n warn('do not have read permissions for ' + hfile, RuntimeWarning)\n return history\n\n\ndef teardown_history(history):\n \"\"\"Tears down the history object.\"\"\"\n env = builtins.__xonsh_env__\n hsize = env.get('XONSH_HISTORY_SIZE')[0]\n hfile = env.get('XONSH_HISTORY_FILE')\n try:\n history.save_history_to_file(hfile, hsize)\n except PermissionError:\n warn('do not have write permissions for ' + hfile, RuntimeWarning)\n\n\nclass PromptToolkitShell(BaseShell):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.history = setup_history()\n self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx)\n self.key_bindings_manager = KeyBindingManager(\n enable_auto_suggest_bindings=True,\n enable_search=True, enable_abort_and_exit_bindings=True)\n load_xonsh_bindings(self.key_bindings_manager)\n\n def __del__(self):\n if self.history is not None:\n teardown_history(self.history)\n\n def cmdloop(self, intro=None):\n \"\"\"Enters a loop that reads and execute input from user.\"\"\"\n if intro:\n print(intro)\n _auto_suggest = AutoSuggestFromHistory()\n while not builtins.__xonsh_exit__:\n try:\n token_func, style_cls = self._get_prompt_tokens_and_style()\n mouse_support = builtins.__xonsh_env__.get('MOUSE_SUPPORT')\n if builtins.__xonsh_env__.get('AUTO_SUGGEST'):\n auto_suggest = _auto_suggest\n else:\n auto_suggest = None\n completions_display = builtins.__xonsh_env__.get('COMPLETIONS_DISPLAY')\n multicolumn = (completions_display == 'multi')\n completer = None if completions_display == 'none' else self.pt_completer\n line = get_input(\n mouse_support=mouse_support,\n auto_suggest=auto_suggest,\n get_prompt_tokens=token_func,\n style=style_cls,\n completer=completer,\n history=self.history,\n key_bindings_registry=self.key_bindings_manager.registry,\n display_completions_in_columns=multicolumn)\n if not line:\n self.emptyline()\n else:\n line = self.precmd(line)\n self.default(line)\n except KeyboardInterrupt:\n self.reset_buffer()\n except EOFError:\n break\n\n def _get_prompt_tokens_and_style(self):\n \"\"\"Returns function to pass as prompt to prompt_toolkit.\"\"\"\n token_names, cstyles, strings = format_prompt_for_prompt_toolkit(self.prompt)\n tokens = [getattr(Token, n) for n in token_names]\n\n def get_tokens(cli):\n return list(zip(tokens, strings))\n\n class CustomStyle(Style):\n styles = {\n Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',\n Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',\n Token.Menu.Completions.ProgressButton: 'bg:#003333',\n Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',\n Token.AutoSuggestion: '#666666',\n Token.Aborted: '#888888',\n }\n # update with the prompt styles\n styles.update({t: s for (t, s) in zip(tokens, cstyles)})\n # Update with with any user styles\n userstyle = builtins.__xonsh_env__.get('PROMPT_TOOLKIT_STYLES')\n if userstyle is not None:\n styles.update(userstyle)\n\n return get_tokens, CustomStyle\n", "path": "xonsh/prompt_toolkit_shell.py"}]} | 1,906 | 128 |
gh_patches_debug_6573 | rasdani/github-patches | git_diff | OCA__bank-payment-44 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Code error in account_payment_sale
I'm reporting a bug in my own code : in the 7.0 branch, in account_payment_sale/model/sale.py line 41, we have the following code:
partner.customer_payment_mode.id or False,
We should NOT have a coma at the end of this line. When we run the module in Odoo 8.0, it will crash when creating a sale order via a call to create because addons/sale/sale.py in odoo 8.0 silently plays the on_change and then it will try to create {"payment_mode_id: (False,)}, when leads to a crash
</issue>
<code>
[start of account_payment_sale/model/sale.py]
1 # -*- encoding: utf-8 -*-
2 ##############################################################################
3 #
4 # Account Payment Sale module for OpenERP
5 # Copyright (C) 2014 Akretion (http://www.akretion.com)
6 # @author Alexis de Lattre <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as
10 # published by the Free Software Foundation, either version 3 of the
11 # License, or (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Affero General Public License for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 ##############################################################################
22
23 from openerp.osv import orm, fields
24
25
26 class sale_order(orm.Model):
27 _inherit = "sale.order"
28
29 _columns = {
30 'payment_mode_id': fields.many2one(
31 'payment.mode', 'Payment Mode'),
32 }
33
34 def onchange_partner_id(self, cr, uid, ids, part, context=None):
35 res = super(sale_order, self).onchange_partner_id(
36 cr, uid, ids, part, context=context)
37 if part:
38 partner = self.pool['res.partner'].browse(
39 cr, uid, part, context=context)
40 res['value']['payment_mode_id'] = \
41 partner.customer_payment_mode.id or False,
42 else:
43 res['value']['payment_mode_id'] = False
44 return res
45
46 def _prepare_invoice(self, cr, uid, order, lines, context=None):
47 """Copy bank partner from sale order to invoice"""
48 invoice_vals = super(sale_order, self)._prepare_invoice(
49 cr, uid, order, lines, context=context)
50 invoice_vals.update({
51 'payment_mode_id': order.payment_mode_id.id or False,
52 'partner_bank_id': order.payment_mode_id and
53 order.payment_mode_id.bank_id.id or False,
54 })
55 return invoice_vals
56
[end of account_payment_sale/model/sale.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/account_payment_sale/model/sale.py b/account_payment_sale/model/sale.py
--- a/account_payment_sale/model/sale.py
+++ b/account_payment_sale/model/sale.py
@@ -38,7 +38,7 @@
partner = self.pool['res.partner'].browse(
cr, uid, part, context=context)
res['value']['payment_mode_id'] = \
- partner.customer_payment_mode.id or False,
+ partner.customer_payment_mode.id or False
else:
res['value']['payment_mode_id'] = False
return res
| {"golden_diff": "diff --git a/account_payment_sale/model/sale.py b/account_payment_sale/model/sale.py\n--- a/account_payment_sale/model/sale.py\n+++ b/account_payment_sale/model/sale.py\n@@ -38,7 +38,7 @@\n partner = self.pool['res.partner'].browse(\n cr, uid, part, context=context)\n res['value']['payment_mode_id'] = \\\n- partner.customer_payment_mode.id or False,\n+ partner.customer_payment_mode.id or False\n else:\n res['value']['payment_mode_id'] = False\n return res\n", "issue": "Code error in account_payment_sale\nI'm reporting a bug in my own code : in the 7.0 branch, in account_payment_sale/model/sale.py line 41, we have the following code:\n\n partner.customer_payment_mode.id or False,\n\nWe should NOT have a coma at the end of this line. When we run the module in Odoo 8.0, it will crash when creating a sale order via a call to create because addons/sale/sale.py in odoo 8.0 silently plays the on_change and then it will try to create {\"payment_mode_id: (False,)}, when leads to a crash\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Account Payment Sale module for OpenERP\n# Copyright (C) 2014 Akretion (http://www.akretion.com)\n# @author Alexis de Lattre <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm, fields\n\n\nclass sale_order(orm.Model):\n _inherit = \"sale.order\"\n\n _columns = {\n 'payment_mode_id': fields.many2one(\n 'payment.mode', 'Payment Mode'),\n }\n\n def onchange_partner_id(self, cr, uid, ids, part, context=None):\n res = super(sale_order, self).onchange_partner_id(\n cr, uid, ids, part, context=context)\n if part:\n partner = self.pool['res.partner'].browse(\n cr, uid, part, context=context)\n res['value']['payment_mode_id'] = \\\n partner.customer_payment_mode.id or False,\n else:\n res['value']['payment_mode_id'] = False\n return res\n\n def _prepare_invoice(self, cr, uid, order, lines, context=None):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n invoice_vals = super(sale_order, self)._prepare_invoice(\n cr, uid, order, lines, context=context)\n invoice_vals.update({\n 'payment_mode_id': order.payment_mode_id.id or False,\n 'partner_bank_id': order.payment_mode_id and\n order.payment_mode_id.bank_id.id or False,\n })\n return invoice_vals\n", "path": "account_payment_sale/model/sale.py"}]} | 1,268 | 123 |
gh_patches_debug_12839 | rasdani/github-patches | git_diff | inventree__InvenTree-3547 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django not available on startup
@matmair bad news. This has broken *stuff*..
https://github.com/inventree/InvenTree/runs/7837878419?check_suite_focus=true
Basically, the `invoke` script makes use of this file and must run *before* django is installed. Classic chicken and egg.
I think we might have to revert this one? LMK if you can see a cleaner solution otherwise I'll rewind this.
_Originally posted by @SchrodingersGat in https://github.com/inventree/InvenTree/issues/3536#issuecomment-1214999583_
</issue>
<code>
[start of InvenTree/InvenTree/config.py]
1 """Helper functions for loading InvenTree configuration options."""
2
3 import logging
4 import os
5 import random
6 import shutil
7 import string
8 from pathlib import Path
9
10 from django.contrib.staticfiles.storage import StaticFilesStorage
11 from django.core.files.storage import default_storage
12
13 import yaml
14
15 logger = logging.getLogger('inventree')
16
17
18 def is_true(x):
19 """Shortcut function to determine if a value "looks" like a boolean"""
20 return str(x).strip().lower() in ['1', 'y', 'yes', 't', 'true', 'on']
21
22
23 def get_base_dir() -> Path:
24 """Returns the base (top-level) InvenTree directory."""
25 return Path(__file__).parent.parent.resolve()
26
27
28 def get_config_file(create=True) -> Path:
29 """Returns the path of the InvenTree configuration file.
30
31 Note: It will be created it if does not already exist!
32 """
33 base_dir = get_base_dir()
34
35 cfg_filename = os.getenv('INVENTREE_CONFIG_FILE')
36
37 if cfg_filename:
38 cfg_filename = Path(cfg_filename.strip()).resolve()
39 else:
40 # Config file is *not* specified - use the default
41 cfg_filename = base_dir.joinpath('config.yaml').resolve()
42
43 if not cfg_filename.exists() and create:
44 print("InvenTree configuration file 'config.yaml' not found - creating default file")
45
46 cfg_template = base_dir.joinpath("config_template.yaml")
47 shutil.copyfile(cfg_template, cfg_filename)
48 print(f"Created config file {cfg_filename}")
49
50 return cfg_filename
51
52
53 def load_config_data() -> map:
54 """Load configuration data from the config file."""
55
56 cfg_file = get_config_file()
57
58 with open(cfg_file, 'r') as cfg:
59 data = yaml.safe_load(cfg)
60
61 return data
62
63
64 def get_setting(env_var=None, config_key=None, default_value=None):
65 """Helper function for retrieving a configuration setting value.
66
67 - First preference is to look for the environment variable
68 - Second preference is to look for the value of the settings file
69 - Third preference is the default value
70
71 Arguments:
72 env_var: Name of the environment variable e.g. 'INVENTREE_STATIC_ROOT'
73 config_key: Key to lookup in the configuration file
74 default_value: Value to return if first two options are not provided
75
76 """
77
78 # First, try to load from the environment variables
79 if env_var is not None:
80 val = os.getenv(env_var, None)
81
82 if val is not None:
83 return val
84
85 # Next, try to load from configuration file
86 if config_key is not None:
87 cfg_data = load_config_data()
88
89 result = None
90
91 # Hack to allow 'path traversal' in configuration file
92 for key in config_key.strip().split('.'):
93
94 if type(cfg_data) is not dict or key not in cfg_data:
95 result = None
96 break
97
98 result = cfg_data[key]
99 cfg_data = cfg_data[key]
100
101 if result is not None:
102 return result
103
104 # Finally, return the default value
105 return default_value
106
107
108 def get_boolean_setting(env_var=None, config_key=None, default_value=False):
109 """Helper function for retreiving a boolean configuration setting"""
110
111 return is_true(get_setting(env_var, config_key, default_value))
112
113
114 def get_media_dir(create=True):
115 """Return the absolute path for the 'media' directory (where uploaded files are stored)"""
116
117 md = get_setting('INVENTREE_MEDIA_ROOT', 'media_root')
118
119 if not md:
120 raise FileNotFoundError('INVENTREE_MEDIA_ROOT not specified')
121
122 md = Path(md).resolve()
123
124 if create:
125 md.mkdir(parents=True, exist_ok=True)
126
127 return md
128
129
130 def get_static_dir(create=True):
131 """Return the absolute path for the 'static' directory (where static files are stored)"""
132
133 sd = get_setting('INVENTREE_STATIC_ROOT', 'static_root')
134
135 if not sd:
136 raise FileNotFoundError('INVENTREE_STATIC_ROOT not specified')
137
138 sd = Path(sd).resolve()
139
140 if create:
141 sd.mkdir(parents=True, exist_ok=True)
142
143 return sd
144
145
146 def get_plugin_file():
147 """Returns the path of the InvenTree plugins specification file.
148
149 Note: It will be created if it does not already exist!
150 """
151
152 # Check if the plugin.txt file (specifying required plugins) is specified
153 plugin_file = get_setting('INVENTREE_PLUGIN_FILE', 'plugin_file')
154
155 if not plugin_file:
156 # If not specified, look in the same directory as the configuration file
157 config_dir = get_config_file().parent
158 plugin_file = config_dir.joinpath('plugins.txt')
159 else:
160 # Make sure we are using a modern Path object
161 plugin_file = Path(plugin_file)
162
163 if not plugin_file.exists():
164 logger.warning("Plugin configuration file does not exist - creating default file")
165 logger.info(f"Creating plugin file at '{plugin_file}'")
166
167 # If opening the file fails (no write permission, for example), then this will throw an error
168 plugin_file.write_text("# InvenTree Plugins (uses PIP framework to install)\n\n")
169
170 return plugin_file
171
172
173 def get_secret_key():
174 """Return the secret key value which will be used by django.
175
176 Following options are tested, in descending order of preference:
177
178 A) Check for environment variable INVENTREE_SECRET_KEY => Use raw key data
179 B) Check for environment variable INVENTREE_SECRET_KEY_FILE => Load key data from file
180 C) Look for default key file "secret_key.txt"
181 D) Create "secret_key.txt" if it does not exist
182 """
183
184 # Look for environment variable
185 if secret_key := get_setting('INVENTREE_SECRET_KEY', 'secret_key'):
186 logger.info("SECRET_KEY loaded by INVENTREE_SECRET_KEY") # pragma: no cover
187 return secret_key
188
189 # Look for secret key file
190 if secret_key_file := get_setting('INVENTREE_SECRET_KEY_FILE', 'secret_key_file'):
191 secret_key_file = Path(secret_key_file).resolve()
192 else:
193 # Default location for secret key file
194 secret_key_file = get_base_dir().joinpath("secret_key.txt").resolve()
195
196 if not secret_key_file.exists():
197 logger.info(f"Generating random key file at '{secret_key_file}'")
198
199 # Create a random key file
200 options = string.digits + string.ascii_letters + string.punctuation
201 key = ''.join([random.choice(options) for i in range(100)])
202 secret_key_file.write_text(key)
203
204 logger.info(f"Loading SECRET_KEY from '{secret_key_file}'")
205
206 key_data = secret_key_file.read_text().strip()
207
208 return key_data
209
210
211 def get_custom_file(env_ref: str, conf_ref: str, log_ref: str, lookup_media: bool = False):
212 """Returns the checked path to a custom file.
213
214 Set lookup_media to True to also search in the media folder.
215 """
216 value = get_setting(env_ref, conf_ref, None)
217
218 if not value:
219 return None
220
221 static_storage = StaticFilesStorage()
222
223 if static_storage.exists(value):
224 logger.info(f"Loading {log_ref} from static directory: {value}")
225 elif lookup_media and default_storage.exists(value):
226 logger.info(f"Loading {log_ref} from media directory: {value}")
227 else:
228 add_dir_str = ' or media' if lookup_media else ''
229 logger.warning(f"The {log_ref} file '{value}' could not be found in the static{add_dir_str} directories")
230 value = False
231
232 return value
233
[end of InvenTree/InvenTree/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/InvenTree/InvenTree/config.py b/InvenTree/InvenTree/config.py
--- a/InvenTree/InvenTree/config.py
+++ b/InvenTree/InvenTree/config.py
@@ -7,9 +7,6 @@
import string
from pathlib import Path
-from django.contrib.staticfiles.storage import StaticFilesStorage
-from django.core.files.storage import default_storage
-
import yaml
logger = logging.getLogger('inventree')
@@ -213,6 +210,9 @@
Set lookup_media to True to also search in the media folder.
"""
+ from django.contrib.staticfiles.storage import StaticFilesStorage
+ from django.core.files.storage import default_storage
+
value = get_setting(env_ref, conf_ref, None)
if not value:
| {"golden_diff": "diff --git a/InvenTree/InvenTree/config.py b/InvenTree/InvenTree/config.py\n--- a/InvenTree/InvenTree/config.py\n+++ b/InvenTree/InvenTree/config.py\n@@ -7,9 +7,6 @@\n import string\n from pathlib import Path\n \n-from django.contrib.staticfiles.storage import StaticFilesStorage\n-from django.core.files.storage import default_storage\n-\n import yaml\n \n logger = logging.getLogger('inventree')\n@@ -213,6 +210,9 @@\n \n Set lookup_media to True to also search in the media folder.\n \"\"\"\n+ from django.contrib.staticfiles.storage import StaticFilesStorage\n+ from django.core.files.storage import default_storage\n+\n value = get_setting(env_ref, conf_ref, None)\n \n if not value:\n", "issue": "Django not available on startup\n@matmair bad news. This has broken *stuff*..\r\n\r\nhttps://github.com/inventree/InvenTree/runs/7837878419?check_suite_focus=true\r\n\r\nBasically, the `invoke` script makes use of this file and must run *before* django is installed. Classic chicken and egg.\r\n\r\nI think we might have to revert this one? LMK if you can see a cleaner solution otherwise I'll rewind this.\r\n\r\n_Originally posted by @SchrodingersGat in https://github.com/inventree/InvenTree/issues/3536#issuecomment-1214999583_\n", "before_files": [{"content": "\"\"\"Helper functions for loading InvenTree configuration options.\"\"\"\n\nimport logging\nimport os\nimport random\nimport shutil\nimport string\nfrom pathlib import Path\n\nfrom django.contrib.staticfiles.storage import StaticFilesStorage\nfrom django.core.files.storage import default_storage\n\nimport yaml\n\nlogger = logging.getLogger('inventree')\n\n\ndef is_true(x):\n \"\"\"Shortcut function to determine if a value \"looks\" like a boolean\"\"\"\n return str(x).strip().lower() in ['1', 'y', 'yes', 't', 'true', 'on']\n\n\ndef get_base_dir() -> Path:\n \"\"\"Returns the base (top-level) InvenTree directory.\"\"\"\n return Path(__file__).parent.parent.resolve()\n\n\ndef get_config_file(create=True) -> Path:\n \"\"\"Returns the path of the InvenTree configuration file.\n\n Note: It will be created it if does not already exist!\n \"\"\"\n base_dir = get_base_dir()\n\n cfg_filename = os.getenv('INVENTREE_CONFIG_FILE')\n\n if cfg_filename:\n cfg_filename = Path(cfg_filename.strip()).resolve()\n else:\n # Config file is *not* specified - use the default\n cfg_filename = base_dir.joinpath('config.yaml').resolve()\n\n if not cfg_filename.exists() and create:\n print(\"InvenTree configuration file 'config.yaml' not found - creating default file\")\n\n cfg_template = base_dir.joinpath(\"config_template.yaml\")\n shutil.copyfile(cfg_template, cfg_filename)\n print(f\"Created config file {cfg_filename}\")\n\n return cfg_filename\n\n\ndef load_config_data() -> map:\n \"\"\"Load configuration data from the config file.\"\"\"\n\n cfg_file = get_config_file()\n\n with open(cfg_file, 'r') as cfg:\n data = yaml.safe_load(cfg)\n\n return data\n\n\ndef get_setting(env_var=None, config_key=None, default_value=None):\n \"\"\"Helper function for retrieving a configuration setting value.\n\n - First preference is to look for the environment variable\n - Second preference is to look for the value of the settings file\n - Third preference is the default value\n\n Arguments:\n env_var: Name of the environment variable e.g. 'INVENTREE_STATIC_ROOT'\n config_key: Key to lookup in the configuration file\n default_value: Value to return if first two options are not provided\n\n \"\"\"\n\n # First, try to load from the environment variables\n if env_var is not None:\n val = os.getenv(env_var, None)\n\n if val is not None:\n return val\n\n # Next, try to load from configuration file\n if config_key is not None:\n cfg_data = load_config_data()\n\n result = None\n\n # Hack to allow 'path traversal' in configuration file\n for key in config_key.strip().split('.'):\n\n if type(cfg_data) is not dict or key not in cfg_data:\n result = None\n break\n\n result = cfg_data[key]\n cfg_data = cfg_data[key]\n\n if result is not None:\n return result\n\n # Finally, return the default value\n return default_value\n\n\ndef get_boolean_setting(env_var=None, config_key=None, default_value=False):\n \"\"\"Helper function for retreiving a boolean configuration setting\"\"\"\n\n return is_true(get_setting(env_var, config_key, default_value))\n\n\ndef get_media_dir(create=True):\n \"\"\"Return the absolute path for the 'media' directory (where uploaded files are stored)\"\"\"\n\n md = get_setting('INVENTREE_MEDIA_ROOT', 'media_root')\n\n if not md:\n raise FileNotFoundError('INVENTREE_MEDIA_ROOT not specified')\n\n md = Path(md).resolve()\n\n if create:\n md.mkdir(parents=True, exist_ok=True)\n\n return md\n\n\ndef get_static_dir(create=True):\n \"\"\"Return the absolute path for the 'static' directory (where static files are stored)\"\"\"\n\n sd = get_setting('INVENTREE_STATIC_ROOT', 'static_root')\n\n if not sd:\n raise FileNotFoundError('INVENTREE_STATIC_ROOT not specified')\n\n sd = Path(sd).resolve()\n\n if create:\n sd.mkdir(parents=True, exist_ok=True)\n\n return sd\n\n\ndef get_plugin_file():\n \"\"\"Returns the path of the InvenTree plugins specification file.\n\n Note: It will be created if it does not already exist!\n \"\"\"\n\n # Check if the plugin.txt file (specifying required plugins) is specified\n plugin_file = get_setting('INVENTREE_PLUGIN_FILE', 'plugin_file')\n\n if not plugin_file:\n # If not specified, look in the same directory as the configuration file\n config_dir = get_config_file().parent\n plugin_file = config_dir.joinpath('plugins.txt')\n else:\n # Make sure we are using a modern Path object\n plugin_file = Path(plugin_file)\n\n if not plugin_file.exists():\n logger.warning(\"Plugin configuration file does not exist - creating default file\")\n logger.info(f\"Creating plugin file at '{plugin_file}'\")\n\n # If opening the file fails (no write permission, for example), then this will throw an error\n plugin_file.write_text(\"# InvenTree Plugins (uses PIP framework to install)\\n\\n\")\n\n return plugin_file\n\n\ndef get_secret_key():\n \"\"\"Return the secret key value which will be used by django.\n\n Following options are tested, in descending order of preference:\n\n A) Check for environment variable INVENTREE_SECRET_KEY => Use raw key data\n B) Check for environment variable INVENTREE_SECRET_KEY_FILE => Load key data from file\n C) Look for default key file \"secret_key.txt\"\n D) Create \"secret_key.txt\" if it does not exist\n \"\"\"\n\n # Look for environment variable\n if secret_key := get_setting('INVENTREE_SECRET_KEY', 'secret_key'):\n logger.info(\"SECRET_KEY loaded by INVENTREE_SECRET_KEY\") # pragma: no cover\n return secret_key\n\n # Look for secret key file\n if secret_key_file := get_setting('INVENTREE_SECRET_KEY_FILE', 'secret_key_file'):\n secret_key_file = Path(secret_key_file).resolve()\n else:\n # Default location for secret key file\n secret_key_file = get_base_dir().joinpath(\"secret_key.txt\").resolve()\n\n if not secret_key_file.exists():\n logger.info(f\"Generating random key file at '{secret_key_file}'\")\n\n # Create a random key file\n options = string.digits + string.ascii_letters + string.punctuation\n key = ''.join([random.choice(options) for i in range(100)])\n secret_key_file.write_text(key)\n\n logger.info(f\"Loading SECRET_KEY from '{secret_key_file}'\")\n\n key_data = secret_key_file.read_text().strip()\n\n return key_data\n\n\ndef get_custom_file(env_ref: str, conf_ref: str, log_ref: str, lookup_media: bool = False):\n \"\"\"Returns the checked path to a custom file.\n\n Set lookup_media to True to also search in the media folder.\n \"\"\"\n value = get_setting(env_ref, conf_ref, None)\n\n if not value:\n return None\n\n static_storage = StaticFilesStorage()\n\n if static_storage.exists(value):\n logger.info(f\"Loading {log_ref} from static directory: {value}\")\n elif lookup_media and default_storage.exists(value):\n logger.info(f\"Loading {log_ref} from media directory: {value}\")\n else:\n add_dir_str = ' or media' if lookup_media else ''\n logger.warning(f\"The {log_ref} file '{value}' could not be found in the static{add_dir_str} directories\")\n value = False\n\n return value\n", "path": "InvenTree/InvenTree/config.py"}]} | 2,955 | 178 |
gh_patches_debug_44794 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-1024 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document reload cause clash between reference with same id but different collection
Hi there,
Let say I have a document with two `ReferenceField` on two different collections with incremental ids.
Given the ids are incremental and not uuid, an id can point on a valid document in both collections (pretty obvious for id `1` ^^)
That being said, the `reload` handle reference reloading in around here: https://github.com/MongoEngine/mongoengine/blob/d4a8ae5743fe335ac4cea957537be34de45a440f/mongoengine/dereference.py#L77
The idea is (if I'm not wrong) to collect the document's references in a dict something like that
``` python
>>> print(self.reference_map)
{
MyDocumentA: [DBref('MyDocumentA', 'id-1'), DBref('MyDocumentA', 'id-2') ],
MyDocumentB: [DBref('MyDocumentB', 'id-1'),]
}
```
Then use this dict to retrieve all those document from database
``` python
>>> print(self.object_map)
{
'id-1': <MyDocumentB: MyDocumentB object>,
'id-2': <MyDocumentA: MyDocumentA object>
}
```
As you can see, given object_map only use id as key, the document `MyDocumentA` with key 'id-1' has been shadowed by the `MyDocumentB` with similar id !
Thus, the final `_attach_objects` aiming at refreshing the main document's references will replace a `MyDocumentA` reference with a `MyDocumentB`...
I think to solve this trouble, we should replace the key in object_map from this single to a `DBref`, thus the check would be on both collection and id.
What do you think ? Given I've spend some time tracking this nasty bug, I think I can provide a fix myself if you ask for ;-)
</issue>
<code>
[start of mongoengine/dereference.py]
1 from bson import DBRef, SON
2
3 from base import (
4 BaseDict, BaseList, EmbeddedDocumentList,
5 TopLevelDocumentMetaclass, get_document
6 )
7 from fields import (ReferenceField, ListField, DictField, MapField)
8 from connection import get_db
9 from queryset import QuerySet
10 from document import Document, EmbeddedDocument
11
12
13 class DeReference(object):
14
15 def __call__(self, items, max_depth=1, instance=None, name=None):
16 """
17 Cheaply dereferences the items to a set depth.
18 Also handles the conversion of complex data types.
19
20 :param items: The iterable (dict, list, queryset) to be dereferenced.
21 :param max_depth: The maximum depth to recurse to
22 :param instance: The owning instance used for tracking changes by
23 :class:`~mongoengine.base.ComplexBaseField`
24 :param name: The name of the field, used for tracking changes by
25 :class:`~mongoengine.base.ComplexBaseField`
26 :param get: A boolean determining if being called by __get__
27 """
28 if items is None or isinstance(items, basestring):
29 return items
30
31 # cheapest way to convert a queryset to a list
32 # list(queryset) uses a count() query to determine length
33 if isinstance(items, QuerySet):
34 items = [i for i in items]
35
36 self.max_depth = max_depth
37 doc_type = None
38
39 if instance and isinstance(instance, (Document, EmbeddedDocument,
40 TopLevelDocumentMetaclass)):
41 doc_type = instance._fields.get(name)
42 while hasattr(doc_type, 'field'):
43 doc_type = doc_type.field
44
45 if isinstance(doc_type, ReferenceField):
46 field = doc_type
47 doc_type = doc_type.document_type
48 is_list = not hasattr(items, 'items')
49
50 if is_list and all([i.__class__ == doc_type for i in items]):
51 return items
52 elif not is_list and all([i.__class__ == doc_type
53 for i in items.values()]):
54 return items
55 elif not field.dbref:
56 if not hasattr(items, 'items'):
57
58 def _get_items(items):
59 new_items = []
60 for v in items:
61 if isinstance(v, list):
62 new_items.append(_get_items(v))
63 elif not isinstance(v, (DBRef, Document)):
64 new_items.append(field.to_python(v))
65 else:
66 new_items.append(v)
67 return new_items
68
69 items = _get_items(items)
70 else:
71 items = dict([
72 (k, field.to_python(v))
73 if not isinstance(v, (DBRef, Document)) else (k, v)
74 for k, v in items.iteritems()]
75 )
76
77 self.reference_map = self._find_references(items)
78 self.object_map = self._fetch_objects(doc_type=doc_type)
79 return self._attach_objects(items, 0, instance, name)
80
81 def _find_references(self, items, depth=0):
82 """
83 Recursively finds all db references to be dereferenced
84
85 :param items: The iterable (dict, list, queryset)
86 :param depth: The current depth of recursion
87 """
88 reference_map = {}
89 if not items or depth >= self.max_depth:
90 return reference_map
91
92 # Determine the iterator to use
93 if not hasattr(items, 'items'):
94 iterator = enumerate(items)
95 else:
96 iterator = items.iteritems()
97
98 # Recursively find dbreferences
99 depth += 1
100 for k, item in iterator:
101 if isinstance(item, (Document, EmbeddedDocument)):
102 for field_name, field in item._fields.iteritems():
103 v = item._data.get(field_name, None)
104 if isinstance(v, (DBRef)):
105 reference_map.setdefault(field.document_type, set()).add(v.id)
106 elif isinstance(v, (dict, SON)) and '_ref' in v:
107 reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id)
108 elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
109 field_cls = getattr(getattr(field, 'field', None), 'document_type', None)
110 references = self._find_references(v, depth)
111 for key, refs in references.iteritems():
112 if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):
113 key = field_cls
114 reference_map.setdefault(key, set()).update(refs)
115 elif isinstance(item, (DBRef)):
116 reference_map.setdefault(item.collection, set()).add(item.id)
117 elif isinstance(item, (dict, SON)) and '_ref' in item:
118 reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id)
119 elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:
120 references = self._find_references(item, depth - 1)
121 for key, refs in references.iteritems():
122 reference_map.setdefault(key, set()).update(refs)
123
124 return reference_map
125
126 def _fetch_objects(self, doc_type=None):
127 """Fetch all references and convert to their document objects
128 """
129 object_map = {}
130 for collection, dbrefs in self.reference_map.iteritems():
131 refs = [dbref for dbref in dbrefs
132 if unicode(dbref).encode('utf-8') not in object_map]
133 if hasattr(collection, 'objects'): # We have a document class for the refs
134 references = collection.objects.in_bulk(refs)
135 for key, doc in references.iteritems():
136 object_map[key] = doc
137 else: # Generic reference: use the refs data to convert to document
138 if isinstance(doc_type, (ListField, DictField, MapField,)):
139 continue
140
141 if doc_type:
142 references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})
143 for ref in references:
144 doc = doc_type._from_son(ref)
145 object_map[doc.id] = doc
146 else:
147 references = get_db()[collection].find({'_id': {'$in': refs}})
148 for ref in references:
149 if '_cls' in ref:
150 doc = get_document(ref["_cls"])._from_son(ref)
151 elif doc_type is None:
152 doc = get_document(
153 ''.join(x.capitalize()
154 for x in collection.split('_')))._from_son(ref)
155 else:
156 doc = doc_type._from_son(ref)
157 object_map[doc.id] = doc
158 return object_map
159
160 def _attach_objects(self, items, depth=0, instance=None, name=None):
161 """
162 Recursively finds all db references to be dereferenced
163
164 :param items: The iterable (dict, list, queryset)
165 :param depth: The current depth of recursion
166 :param instance: The owning instance used for tracking changes by
167 :class:`~mongoengine.base.ComplexBaseField`
168 :param name: The name of the field, used for tracking changes by
169 :class:`~mongoengine.base.ComplexBaseField`
170 """
171 if not items:
172 if isinstance(items, (BaseDict, BaseList)):
173 return items
174
175 if instance:
176 if isinstance(items, dict):
177 return BaseDict(items, instance, name)
178 else:
179 return BaseList(items, instance, name)
180
181 if isinstance(items, (dict, SON)):
182 if '_ref' in items:
183 return self.object_map.get(items['_ref'].id, items)
184 elif '_cls' in items:
185 doc = get_document(items['_cls'])._from_son(items)
186 _cls = doc._data.pop('_cls', None)
187 del items['_cls']
188 doc._data = self._attach_objects(doc._data, depth, doc, None)
189 if _cls is not None:
190 doc._data['_cls'] = _cls
191 return doc
192
193 if not hasattr(items, 'items'):
194 is_list = True
195 list_type = BaseList
196 if isinstance(items, EmbeddedDocumentList):
197 list_type = EmbeddedDocumentList
198 as_tuple = isinstance(items, tuple)
199 iterator = enumerate(items)
200 data = []
201 else:
202 is_list = False
203 iterator = items.iteritems()
204 data = {}
205
206 depth += 1
207 for k, v in iterator:
208 if is_list:
209 data.append(v)
210 else:
211 data[k] = v
212
213 if k in self.object_map and not is_list:
214 data[k] = self.object_map[k]
215 elif isinstance(v, (Document, EmbeddedDocument)):
216 for field_name, field in v._fields.iteritems():
217 v = data[k]._data.get(field_name, None)
218 if isinstance(v, (DBRef)):
219 data[k]._data[field_name] = self.object_map.get(v.id, v)
220 elif isinstance(v, (dict, SON)) and '_ref' in v:
221 data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)
222 elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
223 item_name = "{0}.{1}.{2}".format(name, k, field_name)
224 data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)
225 elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
226 item_name = '%s.%s' % (name, k) if name else name
227 data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)
228 elif hasattr(v, 'id'):
229 data[k] = self.object_map.get(v.id, v)
230
231 if instance and name:
232 if is_list:
233 return tuple(data) if as_tuple else list_type(data, instance, name)
234 return BaseDict(data, instance, name)
235 depth += 1
236 return data
237
[end of mongoengine/dereference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mongoengine/dereference.py b/mongoengine/dereference.py
--- a/mongoengine/dereference.py
+++ b/mongoengine/dereference.py
@@ -128,21 +128,25 @@
"""
object_map = {}
for collection, dbrefs in self.reference_map.iteritems():
- refs = [dbref for dbref in dbrefs
- if unicode(dbref).encode('utf-8') not in object_map]
if hasattr(collection, 'objects'): # We have a document class for the refs
+ col_name = collection._get_collection_name()
+ refs = [dbref for dbref in dbrefs
+ if (col_name, dbref) not in object_map]
references = collection.objects.in_bulk(refs)
for key, doc in references.iteritems():
- object_map[key] = doc
+ object_map[(col_name, key)] = doc
else: # Generic reference: use the refs data to convert to document
if isinstance(doc_type, (ListField, DictField, MapField,)):
continue
+ refs = [dbref for dbref in dbrefs
+ if (collection, dbref) not in object_map]
+
if doc_type:
references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
doc = doc_type._from_son(ref)
- object_map[doc.id] = doc
+ object_map[(collection, doc.id)] = doc
else:
references = get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
@@ -154,7 +158,7 @@
for x in collection.split('_')))._from_son(ref)
else:
doc = doc_type._from_son(ref)
- object_map[doc.id] = doc
+ object_map[(collection, doc.id)] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
@@ -180,7 +184,8 @@
if isinstance(items, (dict, SON)):
if '_ref' in items:
- return self.object_map.get(items['_ref'].id, items)
+ return self.object_map.get(
+ (items['_ref'].collection, items['_ref'].id), items)
elif '_cls' in items:
doc = get_document(items['_cls'])._from_son(items)
_cls = doc._data.pop('_cls', None)
@@ -216,9 +221,11 @@
for field_name, field in v._fields.iteritems():
v = data[k]._data.get(field_name, None)
if isinstance(v, (DBRef)):
- data[k]._data[field_name] = self.object_map.get(v.id, v)
+ data[k]._data[field_name] = self.object_map.get(
+ (v.collection, v.id), v)
elif isinstance(v, (dict, SON)) and '_ref' in v:
- data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)
+ data[k]._data[field_name] = self.object_map.get(
+ (v['_ref'].collection , v['_ref'].id), v)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = "{0}.{1}.{2}".format(name, k, field_name)
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)
@@ -226,7 +233,7 @@
item_name = '%s.%s' % (name, k) if name else name
data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)
elif hasattr(v, 'id'):
- data[k] = self.object_map.get(v.id, v)
+ data[k] = self.object_map.get((v.collection, v.id), v)
if instance and name:
if is_list:
| {"golden_diff": "diff --git a/mongoengine/dereference.py b/mongoengine/dereference.py\n--- a/mongoengine/dereference.py\n+++ b/mongoengine/dereference.py\n@@ -128,21 +128,25 @@\n \"\"\"\n object_map = {}\n for collection, dbrefs in self.reference_map.iteritems():\n- refs = [dbref for dbref in dbrefs\n- if unicode(dbref).encode('utf-8') not in object_map]\n if hasattr(collection, 'objects'): # We have a document class for the refs\n+ col_name = collection._get_collection_name()\n+ refs = [dbref for dbref in dbrefs\n+ if (col_name, dbref) not in object_map]\n references = collection.objects.in_bulk(refs)\n for key, doc in references.iteritems():\n- object_map[key] = doc\n+ object_map[(col_name, key)] = doc\n else: # Generic reference: use the refs data to convert to document\n if isinstance(doc_type, (ListField, DictField, MapField,)):\n continue\n \n+ refs = [dbref for dbref in dbrefs\n+ if (collection, dbref) not in object_map]\n+\n if doc_type:\n references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n doc = doc_type._from_son(ref)\n- object_map[doc.id] = doc\n+ object_map[(collection, doc.id)] = doc\n else:\n references = get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n@@ -154,7 +158,7 @@\n for x in collection.split('_')))._from_son(ref)\n else:\n doc = doc_type._from_son(ref)\n- object_map[doc.id] = doc\n+ object_map[(collection, doc.id)] = doc\n return object_map\n \n def _attach_objects(self, items, depth=0, instance=None, name=None):\n@@ -180,7 +184,8 @@\n \n if isinstance(items, (dict, SON)):\n if '_ref' in items:\n- return self.object_map.get(items['_ref'].id, items)\n+ return self.object_map.get(\n+ (items['_ref'].collection, items['_ref'].id), items)\n elif '_cls' in items:\n doc = get_document(items['_cls'])._from_son(items)\n _cls = doc._data.pop('_cls', None)\n@@ -216,9 +221,11 @@\n for field_name, field in v._fields.iteritems():\n v = data[k]._data.get(field_name, None)\n if isinstance(v, (DBRef)):\n- data[k]._data[field_name] = self.object_map.get(v.id, v)\n+ data[k]._data[field_name] = self.object_map.get(\n+ (v.collection, v.id), v)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n- data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)\n+ data[k]._data[field_name] = self.object_map.get(\n+ (v['_ref'].collection , v['_ref'].id), v)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = \"{0}.{1}.{2}\".format(name, k, field_name)\n data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)\n@@ -226,7 +233,7 @@\n item_name = '%s.%s' % (name, k) if name else name\n data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)\n elif hasattr(v, 'id'):\n- data[k] = self.object_map.get(v.id, v)\n+ data[k] = self.object_map.get((v.collection, v.id), v)\n \n if instance and name:\n if is_list:\n", "issue": "Document reload cause clash between reference with same id but different collection\nHi there,\n\nLet say I have a document with two `ReferenceField` on two different collections with incremental ids.\nGiven the ids are incremental and not uuid, an id can point on a valid document in both collections (pretty obvious for id `1` ^^)\n\nThat being said, the `reload` handle reference reloading in around here: https://github.com/MongoEngine/mongoengine/blob/d4a8ae5743fe335ac4cea957537be34de45a440f/mongoengine/dereference.py#L77\n\nThe idea is (if I'm not wrong) to collect the document's references in a dict something like that\n\n``` python\n>>> print(self.reference_map)\n{\n MyDocumentA: [DBref('MyDocumentA', 'id-1'), DBref('MyDocumentA', 'id-2') ],\n MyDocumentB: [DBref('MyDocumentB', 'id-1'),]\n}\n```\n\nThen use this dict to retrieve all those document from database\n\n``` python\n>>> print(self.object_map)\n{\n 'id-1': <MyDocumentB: MyDocumentB object>,\n 'id-2': <MyDocumentA: MyDocumentA object>\n}\n```\n\nAs you can see, given object_map only use id as key, the document `MyDocumentA` with key 'id-1' has been shadowed by the `MyDocumentB` with similar id !\n\nThus, the final `_attach_objects` aiming at refreshing the main document's references will replace a `MyDocumentA` reference with a `MyDocumentB`...\n\nI think to solve this trouble, we should replace the key in object_map from this single to a `DBref`, thus the check would be on both collection and id.\n\nWhat do you think ? Given I've spend some time tracking this nasty bug, I think I can provide a fix myself if you ask for ;-)\n\n", "before_files": [{"content": "from bson import DBRef, SON\n\nfrom base import (\n BaseDict, BaseList, EmbeddedDocumentList,\n TopLevelDocumentMetaclass, get_document\n)\nfrom fields import (ReferenceField, ListField, DictField, MapField)\nfrom connection import get_db\nfrom queryset import QuerySet\nfrom document import Document, EmbeddedDocument\n\n\nclass DeReference(object):\n\n def __call__(self, items, max_depth=1, instance=None, name=None):\n \"\"\"\n Cheaply dereferences the items to a set depth.\n Also handles the conversion of complex data types.\n\n :param items: The iterable (dict, list, queryset) to be dereferenced.\n :param max_depth: The maximum depth to recurse to\n :param instance: The owning instance used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param name: The name of the field, used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param get: A boolean determining if being called by __get__\n \"\"\"\n if items is None or isinstance(items, basestring):\n return items\n\n # cheapest way to convert a queryset to a list\n # list(queryset) uses a count() query to determine length\n if isinstance(items, QuerySet):\n items = [i for i in items]\n\n self.max_depth = max_depth\n doc_type = None\n\n if instance and isinstance(instance, (Document, EmbeddedDocument,\n TopLevelDocumentMetaclass)):\n doc_type = instance._fields.get(name)\n while hasattr(doc_type, 'field'):\n doc_type = doc_type.field\n\n if isinstance(doc_type, ReferenceField):\n field = doc_type\n doc_type = doc_type.document_type\n is_list = not hasattr(items, 'items')\n\n if is_list and all([i.__class__ == doc_type for i in items]):\n return items\n elif not is_list and all([i.__class__ == doc_type\n for i in items.values()]):\n return items\n elif not field.dbref:\n if not hasattr(items, 'items'):\n\n def _get_items(items):\n new_items = []\n for v in items:\n if isinstance(v, list):\n new_items.append(_get_items(v))\n elif not isinstance(v, (DBRef, Document)):\n new_items.append(field.to_python(v))\n else:\n new_items.append(v)\n return new_items\n\n items = _get_items(items)\n else:\n items = dict([\n (k, field.to_python(v))\n if not isinstance(v, (DBRef, Document)) else (k, v)\n for k, v in items.iteritems()]\n )\n\n self.reference_map = self._find_references(items)\n self.object_map = self._fetch_objects(doc_type=doc_type)\n return self._attach_objects(items, 0, instance, name)\n\n def _find_references(self, items, depth=0):\n \"\"\"\n Recursively finds all db references to be dereferenced\n\n :param items: The iterable (dict, list, queryset)\n :param depth: The current depth of recursion\n \"\"\"\n reference_map = {}\n if not items or depth >= self.max_depth:\n return reference_map\n\n # Determine the iterator to use\n if not hasattr(items, 'items'):\n iterator = enumerate(items)\n else:\n iterator = items.iteritems()\n\n # Recursively find dbreferences\n depth += 1\n for k, item in iterator:\n if isinstance(item, (Document, EmbeddedDocument)):\n for field_name, field in item._fields.iteritems():\n v = item._data.get(field_name, None)\n if isinstance(v, (DBRef)):\n reference_map.setdefault(field.document_type, set()).add(v.id)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n field_cls = getattr(getattr(field, 'field', None), 'document_type', None)\n references = self._find_references(v, depth)\n for key, refs in references.iteritems():\n if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):\n key = field_cls\n reference_map.setdefault(key, set()).update(refs)\n elif isinstance(item, (DBRef)):\n reference_map.setdefault(item.collection, set()).add(item.id)\n elif isinstance(item, (dict, SON)) and '_ref' in item:\n reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id)\n elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:\n references = self._find_references(item, depth - 1)\n for key, refs in references.iteritems():\n reference_map.setdefault(key, set()).update(refs)\n\n return reference_map\n\n def _fetch_objects(self, doc_type=None):\n \"\"\"Fetch all references and convert to their document objects\n \"\"\"\n object_map = {}\n for collection, dbrefs in self.reference_map.iteritems():\n refs = [dbref for dbref in dbrefs\n if unicode(dbref).encode('utf-8') not in object_map]\n if hasattr(collection, 'objects'): # We have a document class for the refs\n references = collection.objects.in_bulk(refs)\n for key, doc in references.iteritems():\n object_map[key] = doc\n else: # Generic reference: use the refs data to convert to document\n if isinstance(doc_type, (ListField, DictField, MapField,)):\n continue\n\n if doc_type:\n references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n doc = doc_type._from_son(ref)\n object_map[doc.id] = doc\n else:\n references = get_db()[collection].find({'_id': {'$in': refs}})\n for ref in references:\n if '_cls' in ref:\n doc = get_document(ref[\"_cls\"])._from_son(ref)\n elif doc_type is None:\n doc = get_document(\n ''.join(x.capitalize()\n for x in collection.split('_')))._from_son(ref)\n else:\n doc = doc_type._from_son(ref)\n object_map[doc.id] = doc\n return object_map\n\n def _attach_objects(self, items, depth=0, instance=None, name=None):\n \"\"\"\n Recursively finds all db references to be dereferenced\n\n :param items: The iterable (dict, list, queryset)\n :param depth: The current depth of recursion\n :param instance: The owning instance used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n :param name: The name of the field, used for tracking changes by\n :class:`~mongoengine.base.ComplexBaseField`\n \"\"\"\n if not items:\n if isinstance(items, (BaseDict, BaseList)):\n return items\n\n if instance:\n if isinstance(items, dict):\n return BaseDict(items, instance, name)\n else:\n return BaseList(items, instance, name)\n\n if isinstance(items, (dict, SON)):\n if '_ref' in items:\n return self.object_map.get(items['_ref'].id, items)\n elif '_cls' in items:\n doc = get_document(items['_cls'])._from_son(items)\n _cls = doc._data.pop('_cls', None)\n del items['_cls']\n doc._data = self._attach_objects(doc._data, depth, doc, None)\n if _cls is not None:\n doc._data['_cls'] = _cls\n return doc\n\n if not hasattr(items, 'items'):\n is_list = True\n list_type = BaseList\n if isinstance(items, EmbeddedDocumentList):\n list_type = EmbeddedDocumentList\n as_tuple = isinstance(items, tuple)\n iterator = enumerate(items)\n data = []\n else:\n is_list = False\n iterator = items.iteritems()\n data = {}\n\n depth += 1\n for k, v in iterator:\n if is_list:\n data.append(v)\n else:\n data[k] = v\n\n if k in self.object_map and not is_list:\n data[k] = self.object_map[k]\n elif isinstance(v, (Document, EmbeddedDocument)):\n for field_name, field in v._fields.iteritems():\n v = data[k]._data.get(field_name, None)\n if isinstance(v, (DBRef)):\n data[k]._data[field_name] = self.object_map.get(v.id, v)\n elif isinstance(v, (dict, SON)) and '_ref' in v:\n data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = \"{0}.{1}.{2}\".format(name, k, field_name)\n data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)\n elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:\n item_name = '%s.%s' % (name, k) if name else name\n data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)\n elif hasattr(v, 'id'):\n data[k] = self.object_map.get(v.id, v)\n\n if instance and name:\n if is_list:\n return tuple(data) if as_tuple else list_type(data, instance, name)\n return BaseDict(data, instance, name)\n depth += 1\n return data\n", "path": "mongoengine/dereference.py"}]} | 3,676 | 908 |
gh_patches_debug_5448 | rasdani/github-patches | git_diff | kserve__kserve-704 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
storage-initialiser fails to copy model from S3
Upon using a Secret and Service account that look like this:
```
apiVersion: v1
metadata:
name: kfserving-aws-secret
namespace: {namespace}
annotations:
serving.kubeflow.org/s3-endpoint: s3.eu-east-1.amazonaws.com
serving.kubeflow.org/s3-usehttps: "1"
serving.kubeflow.org/s3-verifyssl: "1"
serving.kubeflow.org/s3-region: eu-east-1
data:
AWS_ACCESS_KEY_ID: {access_key_base64}
AWS_SECRET_ACCESS_KEY: {secret_key_base64}
kind: Secret
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kfserving-sa
namespace: {namespace}
secrets:
- name: kfserving-aws-secret
```
...and an InferenceService like this:
```
apiVersion: "serving.kubeflow.org/v1alpha2"
kind: "InferenceService"
metadata:
name: {model_name}
namespace: {namespace}
spec:
default:
predictor:
serviceAccountName: kfserving-sa
tensorflow:
storageUri: {model_base_path}
```
...the model fails to serve, with the `storage-initialiser` pod throwing the following error:
```
[I 200226 14:09:42 initializer-entrypoint:13] Initializing, args: src_uri [s3://at-kubeflow-training/tf-job/ksc-ebce76a1] dest_path[ [/mnt/models]
[I 200226 14:09:42 storage:35] Copying contents of s3://at-kubeflow-training/tf-job/ksc-ebce76a1 to local
Traceback (most recent call last):
File "/storage-initializer/scripts/initializer-entrypoint", line 14, in <module>
kfserving.Storage.download(src_uri, dest_path)
File "/usr/local/lib/python3.7/site-packages/kfserving/storage.py", line 50, in download
Storage._download_s3(uri, out_dir)
File "/usr/local/lib/python3.7/site-packages/kfserving/storage.py", line 65, in _download_s3
client = Storage._create_minio_client()
File "/usr/local/lib/python3.7/site-packages/kfserving/storage.py", line 217, in _create_minio_client
secure=use_ssl)
File "/usr/local/lib/python3.7/site-packages/minio/api.py", line 150, in __init__
is_valid_endpoint(endpoint)
File "/usr/local/lib/python3.7/site-packages/minio/helpers.py", line 301, in is_valid_endpoint
if hostname[-1] == '.':
IndexError: string index out of range
```
This was using Kubeflow 1.0.0RC4, KFServing 0.2.2 and KNative 0.11.1
</issue>
<code>
[start of python/kfserving/kfserving/constants/constants.py]
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 # KFServing K8S constants
18 KFSERVING_GROUP = 'serving.kubeflow.org'
19 KFSERVING_KIND = 'InferenceService'
20 KFSERVING_PLURAL = 'inferenceservices'
21 KFSERVING_VERSION = os.environ.get('KFSERVING_VERSION', 'v1alpha2')
22 KFSERVING_API_VERSION = KFSERVING_GROUP + '/' + KFSERVING_VERSION
23
24 KFSERVING_LOGLEVEL = os.environ.get('KFSERVING_LOGLEVEL', 'INFO').upper()
25
26 # INFERENCESERVICE credentials common constants
27 INFERENCESERVICE_CONFIG_MAP_NAME = 'inferenceservice-config'
28 INFERENCESERVICE_SYSTEM_NAMESPACE = 'kfserving-system'
29 DEFAULT_SECRET_NAME = "kfserving-secret-"
30 DEFAULT_SA_NAME = "kfserving-service-credentials"
31
32 # S3 credentials constants
33 S3_ACCESS_KEY_ID_DEFAULT_NAME = "awsAccessKeyID"
34 S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "awsSecretAccessKey"
35 S3_DEFAULT_CREDS_FILE = '~/.aws/credentials'
36
37 # GCS credentials constants
38 GCS_CREDS_FILE_DEFAULT_NAME = 'gcloud-application-credentials.json'
39 GCS_DEFAULT_CREDS_FILE = '~/.config/gcloud/application_default_credentials.json'
40
41 # Azure credentials constants
42 AZ_DEFAULT_CREDS_FILE = '~/.azure/azure_credentials.json'
43
[end of python/kfserving/kfserving/constants/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kfserving/kfserving/constants/constants.py b/python/kfserving/kfserving/constants/constants.py
--- a/python/kfserving/kfserving/constants/constants.py
+++ b/python/kfserving/kfserving/constants/constants.py
@@ -30,8 +30,8 @@
DEFAULT_SA_NAME = "kfserving-service-credentials"
# S3 credentials constants
-S3_ACCESS_KEY_ID_DEFAULT_NAME = "awsAccessKeyID"
-S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "awsSecretAccessKey"
+S3_ACCESS_KEY_ID_DEFAULT_NAME = "AWS_ACCESS_KEY_ID"
+S3_SECRET_ACCESS_KEY_DEFAULT_NAME = "AWS_SECRET_ACCESS_KEY"
S3_DEFAULT_CREDS_FILE = '~/.aws/credentials'
# GCS credentials constants
| {"golden_diff": "diff --git a/python/kfserving/kfserving/constants/constants.py b/python/kfserving/kfserving/constants/constants.py\n--- a/python/kfserving/kfserving/constants/constants.py\n+++ b/python/kfserving/kfserving/constants/constants.py\n@@ -30,8 +30,8 @@\n DEFAULT_SA_NAME = \"kfserving-service-credentials\"\n \n # S3 credentials constants\n-S3_ACCESS_KEY_ID_DEFAULT_NAME = \"awsAccessKeyID\"\n-S3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"awsSecretAccessKey\"\n+S3_ACCESS_KEY_ID_DEFAULT_NAME = \"AWS_ACCESS_KEY_ID\"\n+S3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"AWS_SECRET_ACCESS_KEY\"\n S3_DEFAULT_CREDS_FILE = '~/.aws/credentials'\n \n # GCS credentials constants\n", "issue": "storage-initialiser fails to copy model from S3\nUpon using a Secret and Service account that look like this:\r\n\r\n```\r\napiVersion: v1\r\nmetadata:\r\n name: kfserving-aws-secret\r\n namespace: {namespace}\r\n annotations:\r\n serving.kubeflow.org/s3-endpoint: s3.eu-east-1.amazonaws.com\r\n serving.kubeflow.org/s3-usehttps: \"1\"\r\n serving.kubeflow.org/s3-verifyssl: \"1\"\r\n serving.kubeflow.org/s3-region: eu-east-1\r\ndata:\r\n AWS_ACCESS_KEY_ID: {access_key_base64}\r\n AWS_SECRET_ACCESS_KEY: {secret_key_base64}\r\nkind: Secret\r\ntype: Opaque\r\n---\r\napiVersion: v1\r\nkind: ServiceAccount\r\nmetadata:\r\n name: kfserving-sa\r\n namespace: {namespace}\r\nsecrets:\r\n - name: kfserving-aws-secret \r\n```\r\n...and an InferenceService like this:\r\n\r\n```\r\napiVersion: \"serving.kubeflow.org/v1alpha2\"\r\nkind: \"InferenceService\"\r\nmetadata:\r\n name: {model_name}\r\n namespace: {namespace}\r\nspec:\r\n default:\r\n predictor:\r\n serviceAccountName: kfserving-sa\r\n tensorflow:\r\n storageUri: {model_base_path} \r\n```\r\n\r\n...the model fails to serve, with the `storage-initialiser` pod throwing the following error:\r\n\r\n```\r\n[I 200226 14:09:42 initializer-entrypoint:13] Initializing, args: src_uri [s3://at-kubeflow-training/tf-job/ksc-ebce76a1] dest_path[ [/mnt/models]\r\n[I 200226 14:09:42 storage:35] Copying contents of s3://at-kubeflow-training/tf-job/ksc-ebce76a1 to local\r\nTraceback (most recent call last):\r\n File \"/storage-initializer/scripts/initializer-entrypoint\", line 14, in <module>\r\n kfserving.Storage.download(src_uri, dest_path)\r\n File \"/usr/local/lib/python3.7/site-packages/kfserving/storage.py\", line 50, in download\r\n Storage._download_s3(uri, out_dir)\r\n File \"/usr/local/lib/python3.7/site-packages/kfserving/storage.py\", line 65, in _download_s3\r\n client = Storage._create_minio_client()\r\n File \"/usr/local/lib/python3.7/site-packages/kfserving/storage.py\", line 217, in _create_minio_client\r\n secure=use_ssl)\r\n File \"/usr/local/lib/python3.7/site-packages/minio/api.py\", line 150, in __init__\r\n is_valid_endpoint(endpoint)\r\n File \"/usr/local/lib/python3.7/site-packages/minio/helpers.py\", line 301, in is_valid_endpoint\r\n if hostname[-1] == '.':\r\nIndexError: string index out of range\r\n```\r\n\r\nThis was using Kubeflow 1.0.0RC4, KFServing 0.2.2 and KNative 0.11.1\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\n# KFServing K8S constants\nKFSERVING_GROUP = 'serving.kubeflow.org'\nKFSERVING_KIND = 'InferenceService'\nKFSERVING_PLURAL = 'inferenceservices'\nKFSERVING_VERSION = os.environ.get('KFSERVING_VERSION', 'v1alpha2')\nKFSERVING_API_VERSION = KFSERVING_GROUP + '/' + KFSERVING_VERSION\n\nKFSERVING_LOGLEVEL = os.environ.get('KFSERVING_LOGLEVEL', 'INFO').upper()\n\n# INFERENCESERVICE credentials common constants\nINFERENCESERVICE_CONFIG_MAP_NAME = 'inferenceservice-config'\nINFERENCESERVICE_SYSTEM_NAMESPACE = 'kfserving-system'\nDEFAULT_SECRET_NAME = \"kfserving-secret-\"\nDEFAULT_SA_NAME = \"kfserving-service-credentials\"\n\n# S3 credentials constants\nS3_ACCESS_KEY_ID_DEFAULT_NAME = \"awsAccessKeyID\"\nS3_SECRET_ACCESS_KEY_DEFAULT_NAME = \"awsSecretAccessKey\"\nS3_DEFAULT_CREDS_FILE = '~/.aws/credentials'\n\n# GCS credentials constants\nGCS_CREDS_FILE_DEFAULT_NAME = 'gcloud-application-credentials.json'\nGCS_DEFAULT_CREDS_FILE = '~/.config/gcloud/application_default_credentials.json'\n\n# Azure credentials constants\nAZ_DEFAULT_CREDS_FILE = '~/.azure/azure_credentials.json'\n", "path": "python/kfserving/kfserving/constants/constants.py"}]} | 1,729 | 159 |
gh_patches_debug_37666 | rasdani/github-patches | git_diff | certbot__certbot-7837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request: Add option to know what is the last domain passed to dns hook script
Hi,
certbot doesn't provide an official hook to work with the API of my DNS provider so I'm using my own script. Well, we know every time we create/modify a record on our DNS provider it needs to propagate to all the authoritative DNS servers, in my case it takes 1 minute and 30 seconds approx., right now, as far as I know, there is no option to know what is the last domain that is being issued so to be bullet proof, every time the script creates the _acme_challenge TXT record, I wait 1:30 minutes, well I use the API to know when the propagation is finished but as I said, it takes 1:30 minutes. If I want to issue a certificate with 100 domains it will take approx. 2 hours and 30 minutes to complete the request. If I had a way to know what is the last domain I could write all the TXT records and when the last domain is created, I could add the check to know when the propagation has finished (or just a simple sleep...) so it takes only 1:30 minutes instead of 2 hours and 30 minutes.
It would be great if certbot provided a variable like `CERTBOT_LAST_DOMAIN=true` or something like that so the script could have the logic to sleep the right amount of time to let finish the DNS propagation or to perform the needed checks only when there are no more domains to add to our DNS provider.
As a workaround I pass the last domain to my script:
`./certbot-auto certonly --manual --preferred-challenges=dns --manual-auth-hook "/path/to/hook/dnsapi.sh yyy.domain.tld" --manual-cleanup-hook /path/to/hook/dnsapi-cleanup.sh -d zzz.domain.tld,aaa.domain.tld,yyy.domain.tld`
and it works perfectly but would be great to have this feature added directly to certbot.
Thank you in advance.
Cheers,
sahsanu
</issue>
<code>
[start of certbot/certbot/_internal/plugins/manual.py]
1 """Manual authenticator plugin"""
2 import zope.component
3 import zope.interface
4
5 from acme import challenges
6 from acme.magic_typing import Dict
7 from certbot import achallenges # pylint: disable=unused-import
8 from certbot import errors
9 from certbot import interfaces
10 from certbot import reverter
11 from certbot._internal import hooks
12 from certbot.compat import os
13 from certbot.plugins import common
14
15
16 @zope.interface.implementer(interfaces.IAuthenticator)
17 @zope.interface.provider(interfaces.IPluginFactory)
18 class Authenticator(common.Plugin):
19 """Manual authenticator
20
21 This plugin allows the user to perform the domain validation
22 challenge(s) themselves. This either be done manually by the user or
23 through shell scripts provided to Certbot.
24
25 """
26
27 description = 'Manual configuration or run your own shell scripts'
28 hidden = True
29 long_description = (
30 'Authenticate through manual configuration or custom shell scripts. '
31 'When using shell scripts, an authenticator script must be provided. '
32 'The environment variables available to this script depend on the '
33 'type of challenge. $CERTBOT_DOMAIN will always contain the domain '
34 'being authenticated. For HTTP-01 and DNS-01, $CERTBOT_VALIDATION '
35 'is the validation string, and $CERTBOT_TOKEN is the filename of the '
36 'resource requested when performing an HTTP-01 challenge. An additional '
37 'cleanup script can also be provided and can use the additional variable '
38 '$CERTBOT_AUTH_OUTPUT which contains the stdout output from the auth script.')
39 _DNS_INSTRUCTIONS = """\
40 Please deploy a DNS TXT record under the name
41 {domain} with the following value:
42
43 {validation}
44
45 Before continuing, verify the record is deployed."""
46 _HTTP_INSTRUCTIONS = """\
47 Create a file containing just this data:
48
49 {validation}
50
51 And make it available on your web server at this URL:
52
53 {uri}
54 """
55 _SUBSEQUENT_CHALLENGE_INSTRUCTIONS = """
56 (This must be set up in addition to the previous challenges; do not remove,
57 replace, or undo the previous challenge tasks yet.)
58 """
59 _SUBSEQUENT_DNS_CHALLENGE_INSTRUCTIONS = """
60 (This must be set up in addition to the previous challenges; do not remove,
61 replace, or undo the previous challenge tasks yet. Note that you might be
62 asked to create multiple distinct TXT records with the same name. This is
63 permitted by DNS standards.)
64 """
65
66 def __init__(self, *args, **kwargs):
67 super(Authenticator, self).__init__(*args, **kwargs)
68 self.reverter = reverter.Reverter(self.config)
69 self.reverter.recovery_routine()
70 self.env = dict() \
71 # type: Dict[achallenges.KeyAuthorizationAnnotatedChallenge, Dict[str, str]]
72 self.subsequent_dns_challenge = False
73 self.subsequent_any_challenge = False
74
75 @classmethod
76 def add_parser_arguments(cls, add):
77 add('auth-hook',
78 help='Path or command to execute for the authentication script')
79 add('cleanup-hook',
80 help='Path or command to execute for the cleanup script')
81 add('public-ip-logging-ok', action='store_true',
82 help='Automatically allows public IP logging (default: Ask)')
83
84 def prepare(self): # pylint: disable=missing-function-docstring
85 if self.config.noninteractive_mode and not self.conf('auth-hook'):
86 raise errors.PluginError(
87 'An authentication script must be provided with --{0} when '
88 'using the manual plugin non-interactively.'.format(
89 self.option_name('auth-hook')))
90 self._validate_hooks()
91
92 def _validate_hooks(self):
93 if self.config.validate_hooks:
94 for name in ('auth-hook', 'cleanup-hook'):
95 hook = self.conf(name)
96 if hook is not None:
97 hook_prefix = self.option_name(name)[:-len('-hook')]
98 hooks.validate_hook(hook, hook_prefix)
99
100 def more_info(self): # pylint: disable=missing-function-docstring
101 return (
102 'This plugin allows the user to customize setup for domain '
103 'validation challenges either through shell scripts provided by '
104 'the user or by performing the setup manually.')
105
106 def get_chall_pref(self, domain):
107 # pylint: disable=unused-argument,missing-function-docstring
108 return [challenges.HTTP01, challenges.DNS01]
109
110 def perform(self, achalls): # pylint: disable=missing-function-docstring
111 self._verify_ip_logging_ok()
112 if self.conf('auth-hook'):
113 perform_achall = self._perform_achall_with_script
114 else:
115 perform_achall = self._perform_achall_manually
116
117 responses = []
118 for achall in achalls:
119 perform_achall(achall)
120 responses.append(achall.response(achall.account_key))
121 return responses
122
123 def _verify_ip_logging_ok(self):
124 if not self.conf('public-ip-logging-ok'):
125 cli_flag = '--{0}'.format(self.option_name('public-ip-logging-ok'))
126 msg = ('NOTE: The IP of this machine will be publicly logged as '
127 "having requested this certificate. If you're running "
128 'certbot in manual mode on a machine that is not your '
129 "server, please ensure you're okay with that.\n\n"
130 'Are you OK with your IP being logged?')
131 display = zope.component.getUtility(interfaces.IDisplay)
132 if display.yesno(msg, cli_flag=cli_flag, force_interactive=True):
133 setattr(self.config, self.dest('public-ip-logging-ok'), True)
134 else:
135 raise errors.PluginError('Must agree to IP logging to proceed')
136
137 def _perform_achall_with_script(self, achall):
138 env = dict(CERTBOT_DOMAIN=achall.domain,
139 CERTBOT_VALIDATION=achall.validation(achall.account_key))
140 if isinstance(achall.chall, challenges.HTTP01):
141 env['CERTBOT_TOKEN'] = achall.chall.encode('token')
142 else:
143 os.environ.pop('CERTBOT_TOKEN', None)
144 os.environ.update(env)
145 _, out = self._execute_hook('auth-hook')
146 env['CERTBOT_AUTH_OUTPUT'] = out.strip()
147 self.env[achall] = env
148
149 def _perform_achall_manually(self, achall):
150 validation = achall.validation(achall.account_key)
151 if isinstance(achall.chall, challenges.HTTP01):
152 msg = self._HTTP_INSTRUCTIONS.format(
153 achall=achall, encoded_token=achall.chall.encode('token'),
154 port=self.config.http01_port,
155 uri=achall.chall.uri(achall.domain), validation=validation)
156 else:
157 assert isinstance(achall.chall, challenges.DNS01)
158 msg = self._DNS_INSTRUCTIONS.format(
159 domain=achall.validation_domain_name(achall.domain),
160 validation=validation)
161 if isinstance(achall.chall, challenges.DNS01):
162 if self.subsequent_dns_challenge:
163 # 2nd or later dns-01 challenge
164 msg += self._SUBSEQUENT_DNS_CHALLENGE_INSTRUCTIONS
165 self.subsequent_dns_challenge = True
166 elif self.subsequent_any_challenge:
167 # 2nd or later challenge of another type
168 msg += self._SUBSEQUENT_CHALLENGE_INSTRUCTIONS
169 display = zope.component.getUtility(interfaces.IDisplay)
170 display.notification(msg, wrap=False, force_interactive=True)
171 self.subsequent_any_challenge = True
172
173 def cleanup(self, achalls): # pylint: disable=missing-function-docstring
174 if self.conf('cleanup-hook'):
175 for achall in achalls:
176 env = self.env.pop(achall)
177 if 'CERTBOT_TOKEN' not in env:
178 os.environ.pop('CERTBOT_TOKEN', None)
179 os.environ.update(env)
180 self._execute_hook('cleanup-hook')
181 self.reverter.recovery_routine()
182
183 def _execute_hook(self, hook_name):
184 return hooks.execute(self.option_name(hook_name), self.conf(hook_name))
185
[end of certbot/certbot/_internal/plugins/manual.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot/certbot/_internal/plugins/manual.py b/certbot/certbot/_internal/plugins/manual.py
--- a/certbot/certbot/_internal/plugins/manual.py
+++ b/certbot/certbot/_internal/plugins/manual.py
@@ -35,7 +35,11 @@
'is the validation string, and $CERTBOT_TOKEN is the filename of the '
'resource requested when performing an HTTP-01 challenge. An additional '
'cleanup script can also be provided and can use the additional variable '
- '$CERTBOT_AUTH_OUTPUT which contains the stdout output from the auth script.')
+ '$CERTBOT_AUTH_OUTPUT which contains the stdout output from the auth script.'
+ 'For both authenticator and cleanup script, on HTTP-01 and DNS-01 challenges,'
+ '$CERTBOT_REMAINING_CHALLENGES will be equal to the number of challenges that '
+ 'remain after the current one, and $CERTBOT_ALL_DOMAINS contains a comma-separated '
+ 'list of all domains that are challenged for the current certificate.')
_DNS_INSTRUCTIONS = """\
Please deploy a DNS TXT record under the name
{domain} with the following value:
@@ -109,14 +113,13 @@
def perform(self, achalls): # pylint: disable=missing-function-docstring
self._verify_ip_logging_ok()
- if self.conf('auth-hook'):
- perform_achall = self._perform_achall_with_script
- else:
- perform_achall = self._perform_achall_manually
responses = []
for achall in achalls:
- perform_achall(achall)
+ if self.conf('auth-hook'):
+ self._perform_achall_with_script(achall, achalls)
+ else:
+ self._perform_achall_manually(achall)
responses.append(achall.response(achall.account_key))
return responses
@@ -134,9 +137,11 @@
else:
raise errors.PluginError('Must agree to IP logging to proceed')
- def _perform_achall_with_script(self, achall):
+ def _perform_achall_with_script(self, achall, achalls):
env = dict(CERTBOT_DOMAIN=achall.domain,
- CERTBOT_VALIDATION=achall.validation(achall.account_key))
+ CERTBOT_VALIDATION=achall.validation(achall.account_key),
+ CERTBOT_ALL_DOMAINS=','.join(one_achall.domain for one_achall in achalls),
+ CERTBOT_REMAINING_CHALLENGES=str(len(achalls) - achalls.index(achall) - 1))
if isinstance(achall.chall, challenges.HTTP01):
env['CERTBOT_TOKEN'] = achall.chall.encode('token')
else:
| {"golden_diff": "diff --git a/certbot/certbot/_internal/plugins/manual.py b/certbot/certbot/_internal/plugins/manual.py\n--- a/certbot/certbot/_internal/plugins/manual.py\n+++ b/certbot/certbot/_internal/plugins/manual.py\n@@ -35,7 +35,11 @@\n 'is the validation string, and $CERTBOT_TOKEN is the filename of the '\n 'resource requested when performing an HTTP-01 challenge. An additional '\n 'cleanup script can also be provided and can use the additional variable '\n- '$CERTBOT_AUTH_OUTPUT which contains the stdout output from the auth script.')\n+ '$CERTBOT_AUTH_OUTPUT which contains the stdout output from the auth script.'\n+ 'For both authenticator and cleanup script, on HTTP-01 and DNS-01 challenges,'\n+ '$CERTBOT_REMAINING_CHALLENGES will be equal to the number of challenges that '\n+ 'remain after the current one, and $CERTBOT_ALL_DOMAINS contains a comma-separated '\n+ 'list of all domains that are challenged for the current certificate.')\n _DNS_INSTRUCTIONS = \"\"\"\\\n Please deploy a DNS TXT record under the name\n {domain} with the following value:\n@@ -109,14 +113,13 @@\n \n def perform(self, achalls): # pylint: disable=missing-function-docstring\n self._verify_ip_logging_ok()\n- if self.conf('auth-hook'):\n- perform_achall = self._perform_achall_with_script\n- else:\n- perform_achall = self._perform_achall_manually\n \n responses = []\n for achall in achalls:\n- perform_achall(achall)\n+ if self.conf('auth-hook'):\n+ self._perform_achall_with_script(achall, achalls)\n+ else:\n+ self._perform_achall_manually(achall)\n responses.append(achall.response(achall.account_key))\n return responses\n \n@@ -134,9 +137,11 @@\n else:\n raise errors.PluginError('Must agree to IP logging to proceed')\n \n- def _perform_achall_with_script(self, achall):\n+ def _perform_achall_with_script(self, achall, achalls):\n env = dict(CERTBOT_DOMAIN=achall.domain,\n- CERTBOT_VALIDATION=achall.validation(achall.account_key))\n+ CERTBOT_VALIDATION=achall.validation(achall.account_key),\n+ CERTBOT_ALL_DOMAINS=','.join(one_achall.domain for one_achall in achalls),\n+ CERTBOT_REMAINING_CHALLENGES=str(len(achalls) - achalls.index(achall) - 1))\n if isinstance(achall.chall, challenges.HTTP01):\n env['CERTBOT_TOKEN'] = achall.chall.encode('token')\n else:\n", "issue": "Feature request: Add option to know what is the last domain passed to dns hook script\nHi,\r\n\r\ncertbot doesn't provide an official hook to work with the API of my DNS provider so I'm using my own script. Well, we know every time we create/modify a record on our DNS provider it needs to propagate to all the authoritative DNS servers, in my case it takes 1 minute and 30 seconds approx., right now, as far as I know, there is no option to know what is the last domain that is being issued so to be bullet proof, every time the script creates the _acme_challenge TXT record, I wait 1:30 minutes, well I use the API to know when the propagation is finished but as I said, it takes 1:30 minutes. If I want to issue a certificate with 100 domains it will take approx. 2 hours and 30 minutes to complete the request. If I had a way to know what is the last domain I could write all the TXT records and when the last domain is created, I could add the check to know when the propagation has finished (or just a simple sleep...) so it takes only 1:30 minutes instead of 2 hours and 30 minutes.\r\n\r\nIt would be great if certbot provided a variable like `CERTBOT_LAST_DOMAIN=true` or something like that so the script could have the logic to sleep the right amount of time to let finish the DNS propagation or to perform the needed checks only when there are no more domains to add to our DNS provider.\r\n\r\nAs a workaround I pass the last domain to my script:\r\n\r\n`./certbot-auto certonly --manual --preferred-challenges=dns --manual-auth-hook \"/path/to/hook/dnsapi.sh yyy.domain.tld\" --manual-cleanup-hook /path/to/hook/dnsapi-cleanup.sh -d zzz.domain.tld,aaa.domain.tld,yyy.domain.tld`\r\n\r\nand it works perfectly but would be great to have this feature added directly to certbot.\r\n\r\nThank you in advance.\r\n\r\nCheers,\r\nsahsanu\n", "before_files": [{"content": "\"\"\"Manual authenticator plugin\"\"\"\nimport zope.component\nimport zope.interface\n\nfrom acme import challenges\nfrom acme.magic_typing import Dict\nfrom certbot import achallenges # pylint: disable=unused-import\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot import reverter\nfrom certbot._internal import hooks\nfrom certbot.compat import os\nfrom certbot.plugins import common\n\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(common.Plugin):\n \"\"\"Manual authenticator\n\n This plugin allows the user to perform the domain validation\n challenge(s) themselves. This either be done manually by the user or\n through shell scripts provided to Certbot.\n\n \"\"\"\n\n description = 'Manual configuration or run your own shell scripts'\n hidden = True\n long_description = (\n 'Authenticate through manual configuration or custom shell scripts. '\n 'When using shell scripts, an authenticator script must be provided. '\n 'The environment variables available to this script depend on the '\n 'type of challenge. $CERTBOT_DOMAIN will always contain the domain '\n 'being authenticated. For HTTP-01 and DNS-01, $CERTBOT_VALIDATION '\n 'is the validation string, and $CERTBOT_TOKEN is the filename of the '\n 'resource requested when performing an HTTP-01 challenge. An additional '\n 'cleanup script can also be provided and can use the additional variable '\n '$CERTBOT_AUTH_OUTPUT which contains the stdout output from the auth script.')\n _DNS_INSTRUCTIONS = \"\"\"\\\nPlease deploy a DNS TXT record under the name\n{domain} with the following value:\n\n{validation}\n\nBefore continuing, verify the record is deployed.\"\"\"\n _HTTP_INSTRUCTIONS = \"\"\"\\\nCreate a file containing just this data:\n\n{validation}\n\nAnd make it available on your web server at this URL:\n\n{uri}\n\"\"\"\n _SUBSEQUENT_CHALLENGE_INSTRUCTIONS = \"\"\"\n(This must be set up in addition to the previous challenges; do not remove,\nreplace, or undo the previous challenge tasks yet.)\n\"\"\"\n _SUBSEQUENT_DNS_CHALLENGE_INSTRUCTIONS = \"\"\"\n(This must be set up in addition to the previous challenges; do not remove,\nreplace, or undo the previous challenge tasks yet. Note that you might be\nasked to create multiple distinct TXT records with the same name. This is\npermitted by DNS standards.)\n\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.reverter = reverter.Reverter(self.config)\n self.reverter.recovery_routine()\n self.env = dict() \\\n # type: Dict[achallenges.KeyAuthorizationAnnotatedChallenge, Dict[str, str]]\n self.subsequent_dns_challenge = False\n self.subsequent_any_challenge = False\n\n @classmethod\n def add_parser_arguments(cls, add):\n add('auth-hook',\n help='Path or command to execute for the authentication script')\n add('cleanup-hook',\n help='Path or command to execute for the cleanup script')\n add('public-ip-logging-ok', action='store_true',\n help='Automatically allows public IP logging (default: Ask)')\n\n def prepare(self): # pylint: disable=missing-function-docstring\n if self.config.noninteractive_mode and not self.conf('auth-hook'):\n raise errors.PluginError(\n 'An authentication script must be provided with --{0} when '\n 'using the manual plugin non-interactively.'.format(\n self.option_name('auth-hook')))\n self._validate_hooks()\n\n def _validate_hooks(self):\n if self.config.validate_hooks:\n for name in ('auth-hook', 'cleanup-hook'):\n hook = self.conf(name)\n if hook is not None:\n hook_prefix = self.option_name(name)[:-len('-hook')]\n hooks.validate_hook(hook, hook_prefix)\n\n def more_info(self): # pylint: disable=missing-function-docstring\n return (\n 'This plugin allows the user to customize setup for domain '\n 'validation challenges either through shell scripts provided by '\n 'the user or by performing the setup manually.')\n\n def get_chall_pref(self, domain):\n # pylint: disable=unused-argument,missing-function-docstring\n return [challenges.HTTP01, challenges.DNS01]\n\n def perform(self, achalls): # pylint: disable=missing-function-docstring\n self._verify_ip_logging_ok()\n if self.conf('auth-hook'):\n perform_achall = self._perform_achall_with_script\n else:\n perform_achall = self._perform_achall_manually\n\n responses = []\n for achall in achalls:\n perform_achall(achall)\n responses.append(achall.response(achall.account_key))\n return responses\n\n def _verify_ip_logging_ok(self):\n if not self.conf('public-ip-logging-ok'):\n cli_flag = '--{0}'.format(self.option_name('public-ip-logging-ok'))\n msg = ('NOTE: The IP of this machine will be publicly logged as '\n \"having requested this certificate. If you're running \"\n 'certbot in manual mode on a machine that is not your '\n \"server, please ensure you're okay with that.\\n\\n\"\n 'Are you OK with your IP being logged?')\n display = zope.component.getUtility(interfaces.IDisplay)\n if display.yesno(msg, cli_flag=cli_flag, force_interactive=True):\n setattr(self.config, self.dest('public-ip-logging-ok'), True)\n else:\n raise errors.PluginError('Must agree to IP logging to proceed')\n\n def _perform_achall_with_script(self, achall):\n env = dict(CERTBOT_DOMAIN=achall.domain,\n CERTBOT_VALIDATION=achall.validation(achall.account_key))\n if isinstance(achall.chall, challenges.HTTP01):\n env['CERTBOT_TOKEN'] = achall.chall.encode('token')\n else:\n os.environ.pop('CERTBOT_TOKEN', None)\n os.environ.update(env)\n _, out = self._execute_hook('auth-hook')\n env['CERTBOT_AUTH_OUTPUT'] = out.strip()\n self.env[achall] = env\n\n def _perform_achall_manually(self, achall):\n validation = achall.validation(achall.account_key)\n if isinstance(achall.chall, challenges.HTTP01):\n msg = self._HTTP_INSTRUCTIONS.format(\n achall=achall, encoded_token=achall.chall.encode('token'),\n port=self.config.http01_port,\n uri=achall.chall.uri(achall.domain), validation=validation)\n else:\n assert isinstance(achall.chall, challenges.DNS01)\n msg = self._DNS_INSTRUCTIONS.format(\n domain=achall.validation_domain_name(achall.domain),\n validation=validation)\n if isinstance(achall.chall, challenges.DNS01):\n if self.subsequent_dns_challenge:\n # 2nd or later dns-01 challenge\n msg += self._SUBSEQUENT_DNS_CHALLENGE_INSTRUCTIONS\n self.subsequent_dns_challenge = True\n elif self.subsequent_any_challenge:\n # 2nd or later challenge of another type\n msg += self._SUBSEQUENT_CHALLENGE_INSTRUCTIONS\n display = zope.component.getUtility(interfaces.IDisplay)\n display.notification(msg, wrap=False, force_interactive=True)\n self.subsequent_any_challenge = True\n\n def cleanup(self, achalls): # pylint: disable=missing-function-docstring\n if self.conf('cleanup-hook'):\n for achall in achalls:\n env = self.env.pop(achall)\n if 'CERTBOT_TOKEN' not in env:\n os.environ.pop('CERTBOT_TOKEN', None)\n os.environ.update(env)\n self._execute_hook('cleanup-hook')\n self.reverter.recovery_routine()\n\n def _execute_hook(self, hook_name):\n return hooks.execute(self.option_name(hook_name), self.conf(hook_name))\n", "path": "certbot/certbot/_internal/plugins/manual.py"}]} | 3,167 | 627 |
gh_patches_debug_11237 | rasdani/github-patches | git_diff | Mailu__Mailu-1183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mailu 1.7 : Issue when moving mail to the Junk folder
When I move a mail to the Junk Folder, i see the following in the logs:
imap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu
imap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu
imap_1 | Sep 22 16:51:26 imap: Error: tee: /dev/fd/63: I/O error
imap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: program exec:/conf/bin/spam (27655): Terminated with non-zero exit code 1
imap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: sieve: left message in mailbox 'Junk'
</issue>
<code>
[start of core/dovecot/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import multiprocessing
6 import logging as log
7 import sys
8
9 from podop import run_server
10 from socrate import system, conf
11
12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
13
14 def start_podop():
15 os.setuid(8)
16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
17 run_server(0, "dovecot", "/tmp/podop.socket", [
18 ("quota", "url", url ),
19 ("auth", "url", url),
20 ("sieve", "url", url),
21 ])
22
23 # Actual startup script
24
25 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
26 os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
27 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
28 os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334")
29 if os.environ["WEBMAIL"] != "none":
30 os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
31
32 for dovecot_file in glob.glob("/conf/*.conf"):
33 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
34
35 # Run Podop, then postfix
36 multiprocessing.Process(target=start_podop).start()
37 os.system("chown mail:mail /mail")
38 os.system("chown -R mail:mail /var/lib/dovecot /conf")
39 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
40
[end of core/dovecot/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -32,6 +32,12 @@
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
+os.makedirs("/conf/bin", exist_ok=True)
+for script_file in glob.glob("/conf/*.script"):
+ out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
+ conf.jinja(script_file, os.environ, out_file)
+ os.chmod(out_file, 0o555)
+
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
| {"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -32,6 +32,12 @@\n for dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n \n+os.makedirs(\"/conf/bin\", exist_ok=True)\n+for script_file in glob.glob(\"/conf/*.script\"):\n+ out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n+ conf.jinja(script_file, os.environ, out_file)\n+ os.chmod(out_file, 0o555)\n+\n # Run Podop, then postfix\n multiprocessing.Process(target=start_podop).start()\n os.system(\"chown mail:mail /mail\")\n", "issue": "Mailu 1.7 : Issue when moving mail to the Junk folder\nWhen I move a mail to the Junk Folder, i see the following in the logs:\r\nimap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu\r\nimap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu\r\nimap_1 | Sep 22 16:51:26 imap: Error: tee: /dev/fd/63: I/O error\r\nimap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: program exec:/conf/bin/spam (27655): Terminated with non-zero exit code 1\r\nimap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: sieve: left message in mailbox 'Junk'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\n\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM\", \"antispam:11334\")\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]} | 1,257 | 196 |
gh_patches_debug_36986 | rasdani/github-patches | git_diff | encode__httpx-167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
non-ASCII characters filename in multipart/form-data
I tried to send a file with russian letters in the file name, and `httpx` escaped all non-ASCII characters (as I understood it the way specified in [RFC 7578](https://tools.ietf.org/html/rfc7578)). But this is different from `requests` [behavior](https://github.com/psf/requests/blob/589a82256759018a7e5e289302898dae32544949/requests/models.py#L110), where the file name is simply written as is (using [`RequestField.make_multipart`](https://github.com/urllib3/urllib3/blob/f0d9ebc41e51c4c4c9990b1eed02d297fd1b20d8/src/urllib3/fields.py#L248) from `urllib3`). Maybe `httpx` should just change [`FileField.render_headers`](https://github.com/encode/httpx/blob/3ba2e8c328f05b07ee06ae40f99dce7fe1e8292c/httpx/multipart.py#L57), so as not to escape the file name and instead just put it as bytes?
```diff
def render_headers(self) -> bytes:
- name = quote(self.name, encoding="utf-8").encode("ascii")
- filename = quote(self.filename, encoding="utf-8").encode("ascii")
- content_type = self.content_type.encode("ascii")
return b"".join(
[
b'Content-Disposition: form-data; name="',
- name,
+ self.name.encode(),
b'"; filename="',
- filename,
+ self.filename.encode(),
b'"\r\n',
b"Content-Type: ",
- content_type,
+ self.content_type.encode(),
b"\r\n",
]
)
```
</issue>
<code>
[start of httpx/multipart.py]
1 import binascii
2 import mimetypes
3 import os
4 import typing
5 from io import BytesIO
6 from urllib.parse import quote
7
8
9 class Field:
10 def render_headers(self) -> bytes:
11 raise NotImplementedError() # pragma: nocover
12
13 def render_data(self) -> bytes:
14 raise NotImplementedError() # pragma: nocover
15
16
17 class DataField(Field):
18 def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:
19 if not isinstance(name, str):
20 raise TypeError("Invalid type for name. Expected str.")
21 if not isinstance(value, (str, bytes)):
22 raise TypeError("Invalid type for value. Expected str or bytes.")
23 self.name = name
24 self.value = value
25
26 def render_headers(self) -> bytes:
27 name = quote(self.name, encoding="utf-8").encode("ascii")
28 return b"".join(
29 [b'Content-Disposition: form-data; name="', name, b'"\r\n' b"\r\n"]
30 )
31
32 def render_data(self) -> bytes:
33 return (
34 self.value if isinstance(self.value, bytes) else self.value.encode("utf-8")
35 )
36
37
38 class FileField(Field):
39 def __init__(
40 self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]
41 ) -> None:
42 self.name = name
43 if not isinstance(value, tuple):
44 self.filename = os.path.basename(getattr(value, "name", "upload"))
45 self.file = value # type: typing.Union[typing.IO[str], typing.IO[bytes]]
46 self.content_type = self.guess_content_type()
47 else:
48 self.filename = value[0]
49 self.file = value[1]
50 self.content_type = (
51 value[2] if len(value) > 2 else self.guess_content_type()
52 )
53
54 def guess_content_type(self) -> str:
55 return mimetypes.guess_type(self.filename)[0] or "application/octet-stream"
56
57 def render_headers(self) -> bytes:
58 name = quote(self.name, encoding="utf-8").encode("ascii")
59 filename = quote(self.filename, encoding="utf-8").encode("ascii")
60 content_type = self.content_type.encode("ascii")
61 return b"".join(
62 [
63 b'Content-Disposition: form-data; name="',
64 name,
65 b'"; filename="',
66 filename,
67 b'"\r\n',
68 b"Content-Type: ",
69 content_type,
70 b"\r\n",
71 b"\r\n",
72 ]
73 )
74
75 def render_data(self) -> bytes:
76 content = self.file.read()
77 return content.encode("utf-8") if isinstance(content, str) else content
78
79
80 def iter_fields(data: dict, files: dict) -> typing.Iterator[Field]:
81 for name, value in data.items():
82 if isinstance(value, (list, dict)):
83 for item in value:
84 yield DataField(name=name, value=item)
85 else:
86 yield DataField(name=name, value=value)
87
88 for name, value in files.items():
89 yield FileField(name=name, value=value)
90
91
92 def multipart_encode(data: dict, files: dict) -> typing.Tuple[bytes, str]:
93 body = BytesIO()
94 boundary = binascii.hexlify(os.urandom(16))
95
96 for field in iter_fields(data, files):
97 body.write(b"--%s\r\n" % boundary)
98 body.write(field.render_headers())
99 body.write(field.render_data())
100 body.write(b"\r\n")
101
102 body.write(b"--%s--\r\n" % boundary)
103
104 content_type = "multipart/form-data; boundary=%s" % boundary.decode("ascii")
105
106 return body.getvalue(), content_type
107
[end of httpx/multipart.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/multipart.py b/httpx/multipart.py
--- a/httpx/multipart.py
+++ b/httpx/multipart.py
@@ -1,9 +1,17 @@
import binascii
import mimetypes
import os
+import re
import typing
from io import BytesIO
-from urllib.parse import quote
+
+_HTML5_FORM_ENCODING_REPLACEMENTS = {'"': "%22", "\\": "\\\\"}
+_HTML5_FORM_ENCODING_REPLACEMENTS.update(
+ {chr(c): "%{:02X}".format(c) for c in range(0x00, 0x1F + 1) if c != 0x1B}
+)
+_HTML5_FORM_ENCODING_RE = re.compile(
+ r"|".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()])
+)
class Field:
@@ -24,10 +32,8 @@
self.value = value
def render_headers(self) -> bytes:
- name = quote(self.name, encoding="utf-8").encode("ascii")
- return b"".join(
- [b'Content-Disposition: form-data; name="', name, b'"\r\n' b"\r\n"]
- )
+ name = _format_param("name", self.name)
+ return b"".join([b"Content-Disposition: form-data; ", name, b"\r\n\r\n"])
def render_data(self) -> bytes:
return (
@@ -55,20 +61,18 @@
return mimetypes.guess_type(self.filename)[0] or "application/octet-stream"
def render_headers(self) -> bytes:
- name = quote(self.name, encoding="utf-8").encode("ascii")
- filename = quote(self.filename, encoding="utf-8").encode("ascii")
- content_type = self.content_type.encode("ascii")
+ name = _format_param("name", self.name)
+ filename = _format_param("filename", self.filename)
+ content_type = self.content_type.encode()
return b"".join(
[
- b'Content-Disposition: form-data; name="',
+ b"Content-Disposition: form-data; ",
name,
- b'"; filename="',
+ b"; ",
filename,
- b'"\r\n',
- b"Content-Type: ",
+ b"\r\nContent-Type: ",
content_type,
- b"\r\n",
- b"\r\n",
+ b"\r\n\r\n",
]
)
@@ -104,3 +108,14 @@
content_type = "multipart/form-data; boundary=%s" % boundary.decode("ascii")
return body.getvalue(), content_type
+
+
+def _format_param(name: str, value: typing.Union[str, bytes]) -> bytes:
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ def replacer(match: typing.Match[str]) -> str:
+ return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)]
+
+ value = _HTML5_FORM_ENCODING_RE.sub(replacer, value)
+ return f'{name}="{value}"'.encode()
| {"golden_diff": "diff --git a/httpx/multipart.py b/httpx/multipart.py\n--- a/httpx/multipart.py\n+++ b/httpx/multipart.py\n@@ -1,9 +1,17 @@\n import binascii\n import mimetypes\n import os\n+import re\n import typing\n from io import BytesIO\n-from urllib.parse import quote\n+\n+_HTML5_FORM_ENCODING_REPLACEMENTS = {'\"': \"%22\", \"\\\\\": \"\\\\\\\\\"}\n+_HTML5_FORM_ENCODING_REPLACEMENTS.update(\n+ {chr(c): \"%{:02X}\".format(c) for c in range(0x00, 0x1F + 1) if c != 0x1B}\n+)\n+_HTML5_FORM_ENCODING_RE = re.compile(\n+ r\"|\".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()])\n+)\n \n \n class Field:\n@@ -24,10 +32,8 @@\n self.value = value\n \n def render_headers(self) -> bytes:\n- name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n- return b\"\".join(\n- [b'Content-Disposition: form-data; name=\"', name, b'\"\\r\\n' b\"\\r\\n\"]\n- )\n+ name = _format_param(\"name\", self.name)\n+ return b\"\".join([b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"])\n \n def render_data(self) -> bytes:\n return (\n@@ -55,20 +61,18 @@\n return mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n \n def render_headers(self) -> bytes:\n- name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n- filename = quote(self.filename, encoding=\"utf-8\").encode(\"ascii\")\n- content_type = self.content_type.encode(\"ascii\")\n+ name = _format_param(\"name\", self.name)\n+ filename = _format_param(\"filename\", self.filename)\n+ content_type = self.content_type.encode()\n return b\"\".join(\n [\n- b'Content-Disposition: form-data; name=\"',\n+ b\"Content-Disposition: form-data; \",\n name,\n- b'\"; filename=\"',\n+ b\"; \",\n filename,\n- b'\"\\r\\n',\n- b\"Content-Type: \",\n+ b\"\\r\\nContent-Type: \",\n content_type,\n- b\"\\r\\n\",\n- b\"\\r\\n\",\n+ b\"\\r\\n\\r\\n\",\n ]\n )\n \n@@ -104,3 +108,14 @@\n content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\"ascii\")\n \n return body.getvalue(), content_type\n+\n+\n+def _format_param(name: str, value: typing.Union[str, bytes]) -> bytes:\n+ if isinstance(value, bytes):\n+ value = value.decode()\n+ \n+ def replacer(match: typing.Match[str]) -> str:\n+ return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)]\n+\n+ value = _HTML5_FORM_ENCODING_RE.sub(replacer, value)\n+ return f'{name}=\"{value}\"'.encode()\n", "issue": "non-ASCII characters filename in multipart/form-data\nI tried to send a file with russian letters in the file name, and `httpx` escaped all non-ASCII characters (as I understood it the way specified in [RFC 7578](https://tools.ietf.org/html/rfc7578)). But this is different from `requests` [behavior](https://github.com/psf/requests/blob/589a82256759018a7e5e289302898dae32544949/requests/models.py#L110), where the file name is simply written as is (using [`RequestField.make_multipart`](https://github.com/urllib3/urllib3/blob/f0d9ebc41e51c4c4c9990b1eed02d297fd1b20d8/src/urllib3/fields.py#L248) from `urllib3`). Maybe `httpx` should just change [`FileField.render_headers`](https://github.com/encode/httpx/blob/3ba2e8c328f05b07ee06ae40f99dce7fe1e8292c/httpx/multipart.py#L57), so as not to escape the file name and instead just put it as bytes?\r\n\r\n```diff\r\ndef render_headers(self) -> bytes:\r\n- name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\r\n- filename = quote(self.filename, encoding=\"utf-8\").encode(\"ascii\")\r\n- content_type = self.content_type.encode(\"ascii\")\r\n return b\"\".join(\r\n [\r\n b'Content-Disposition: form-data; name=\"',\r\n- name,\r\n+ self.name.encode(),\r\n b'\"; filename=\"',\r\n- filename,\r\n+ self.filename.encode(),\r\n b'\"\\r\\n',\r\n b\"Content-Type: \",\r\n- content_type,\r\n+ self.content_type.encode(),\r\n b\"\\r\\n\",\r\n ]\r\n )\r\n```\n", "before_files": [{"content": "import binascii\nimport mimetypes\nimport os\nimport typing\nfrom io import BytesIO\nfrom urllib.parse import quote\n\n\nclass Field:\n def render_headers(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def render_data(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass DataField(Field):\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\"Invalid type for name. Expected str.\")\n if not isinstance(value, (str, bytes)):\n raise TypeError(\"Invalid type for value. Expected str or bytes.\")\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n return b\"\".join(\n [b'Content-Disposition: form-data; name=\"', name, b'\"\\r\\n' b\"\\r\\n\"]\n )\n\n def render_data(self) -> bytes:\n return (\n self.value if isinstance(self.value, bytes) else self.value.encode(\"utf-8\")\n )\n\n\nclass FileField(Field):\n def __init__(\n self, name: str, value: typing.Union[typing.IO[typing.AnyStr], tuple]\n ) -> None:\n self.name = name\n if not isinstance(value, tuple):\n self.filename = os.path.basename(getattr(value, \"name\", \"upload\"))\n self.file = value # type: typing.Union[typing.IO[str], typing.IO[bytes]]\n self.content_type = self.guess_content_type()\n else:\n self.filename = value[0]\n self.file = value[1]\n self.content_type = (\n value[2] if len(value) > 2 else self.guess_content_type()\n )\n\n def guess_content_type(self) -> str:\n return mimetypes.guess_type(self.filename)[0] or \"application/octet-stream\"\n\n def render_headers(self) -> bytes:\n name = quote(self.name, encoding=\"utf-8\").encode(\"ascii\")\n filename = quote(self.filename, encoding=\"utf-8\").encode(\"ascii\")\n content_type = self.content_type.encode(\"ascii\")\n return b\"\".join(\n [\n b'Content-Disposition: form-data; name=\"',\n name,\n b'\"; filename=\"',\n filename,\n b'\"\\r\\n',\n b\"Content-Type: \",\n content_type,\n b\"\\r\\n\",\n b\"\\r\\n\",\n ]\n )\n\n def render_data(self) -> bytes:\n content = self.file.read()\n return content.encode(\"utf-8\") if isinstance(content, str) else content\n\n\ndef iter_fields(data: dict, files: dict) -> typing.Iterator[Field]:\n for name, value in data.items():\n if isinstance(value, (list, dict)):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n for name, value in files.items():\n yield FileField(name=name, value=value)\n\n\ndef multipart_encode(data: dict, files: dict) -> typing.Tuple[bytes, str]:\n body = BytesIO()\n boundary = binascii.hexlify(os.urandom(16))\n\n for field in iter_fields(data, files):\n body.write(b\"--%s\\r\\n\" % boundary)\n body.write(field.render_headers())\n body.write(field.render_data())\n body.write(b\"\\r\\n\")\n\n body.write(b\"--%s--\\r\\n\" % boundary)\n\n content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\"ascii\")\n\n return body.getvalue(), content_type\n", "path": "httpx/multipart.py"}]} | 2,009 | 708 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.